index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
98,400 |
e59caaa334c664fef6f61b9d304d47be7c3463b6
|
# 除法运算符 /
a = 10
b = 2
print(a / b)
# 向下取证运算符 //
a = 9
b = 3
print(a / b)
# 3.0
print(a // b)
# 3
# 余数运算符
print(10 / 3) # 3.3333333333333335
print(10 % 3) # 1
print(3 / 10) # 0.3
print(3 % 10) # 3
print(-10 % 3) # r=-10+12 =2
print(10 % -3) # r=10-12= -2
# 赋值运算符
a = b = 5
print(a, b) # 5 5
a, b = 5, 10
print(a, b) # 5 10
# a,b位置互换
a, b = b, a
print(a, b)
# 10 5
# 复合赋值运算符
a = 5
a += 2 # a=5+2=7
print(a)
b = 10
b *= 2
print(b) # 20
|
[
"# 除法运算符 /\na = 10\nb = 2\nprint(a / b)\n\n# 向下取证运算符 //\na = 9\nb = 3\nprint(a / b)\n# 3.0\nprint(a // b)\n# 3\n\n# 余数运算符\nprint(10 / 3) # 3.3333333333333335\nprint(10 % 3) # 1\nprint(3 / 10) # 0.3\nprint(3 % 10) # 3\nprint(-10 % 3) # r=-10+12 =2\nprint(10 % -3) # r=10-12= -2\n\n# 赋值运算符\na = b = 5\nprint(a, b) # 5 5\na, b = 5, 10\nprint(a, b) # 5 10\n# a,b位置互换\na, b = b, a\nprint(a, b)\n# 10 5\n\n# 复合赋值运算符\na = 5\na += 2 # a=5+2=7\nprint(a)\n\nb = 10\nb *= 2\nprint(b) # 20\n",
"a = 10\nb = 2\nprint(a / b)\na = 9\nb = 3\nprint(a / b)\nprint(a // b)\nprint(10 / 3)\nprint(10 % 3)\nprint(3 / 10)\nprint(3 % 10)\nprint(-10 % 3)\nprint(10 % -3)\na = b = 5\nprint(a, b)\na, b = 5, 10\nprint(a, b)\na, b = b, a\nprint(a, b)\na = 5\na += 2\nprint(a)\nb = 10\nb *= 2\nprint(b)\n",
"<assignment token>\nprint(a / b)\n<assignment token>\nprint(a / b)\nprint(a // b)\nprint(10 / 3)\nprint(10 % 3)\nprint(3 / 10)\nprint(3 % 10)\nprint(-10 % 3)\nprint(10 % -3)\n<assignment token>\nprint(a, b)\n<assignment token>\nprint(a, b)\n<assignment token>\nprint(a, b)\n<assignment token>\na += 2\nprint(a)\n<assignment token>\nb *= 2\nprint(b)\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,401 |
1066b16c2dbbca78dfcae0b8831b2c042701a807
|
from mVray import vrayFrameBuffers as vfb
from PySide import QtGui
from PySide import QtCore
import maya.cmds as cmds
import maya.OpenMayaUI as mui
import shiboken
import yaml
import os
########################################################################
############################### GUI ####################################
########################################################################
def getMayaWindow():
pointer = mui.MQtUtil.mainWindow()
return shiboken.wrapInstance(long(pointer), QtGui.QWidget)
class UtilityToolBoxUI(QtGui.QDialog):
def __init__(self, parent=getMayaWindow()):
super(UtilityToolBoxUI, self).__init__(parent)
getShowInfo()
self.setWindowTitle("Utility Toolbox")
self.setWindowFlags(QtCore.Qt.Tool) # makes it a tool window so it will stay on top of maya
self.setAttribute(QtCore.Qt.WA_DeleteOnClose) # deletes UI when closed
########### Checkbox Label List #####################################
self.topList = ["Red", "Green", "Blue", "White", "Black"]
self.middleTopList = ["Shadow", "Contact_Shadow", "Fresnel", "Reflection_Occ"]
self.middleBotList = ["Shadow_Catcher", "Plate_Projection", "Reflection_Catcher"]
self.bottomList = ["Ref_Spheres", "UV"]
self.userList = []
#############################################################################
self.createLayout() # runs function below
################################################################################
##################### Layout Creation ##########################################
################################################################################
def createLayout(self):
layout = QtGui.QVBoxLayout() # main layout
self.setMinimumHeight(650)
self.setMinimumWidth(750)
layout.setSpacing(0)
########### Catch All Checkboxes Here ################
self.cbButtonList = {}
self.getState = {}
self.userListCheckBox = {}
self.exportShadDict = {}
############ Save/Load Preset ##########################
radioLayout = QtGui.QHBoxLayout()
layout.addLayout(radioLayout)
spacer = QtGui.QSpacerItem(175,0)
radioLayout.addSpacerItem(spacer)
radioLabel = QtGui.QLabel("Save Preset")
radioLayout.addWidget(radioLabel)
font = QtGui.QFont()
font.setBold(True)
font.setPointSize(12)
radioLabel.setFont(font)
radioLabel.setMaximumWidth(100)
radioGroup = QtGui.QButtonGroup(self)
self.showRadio = QtGui.QRadioButton("Show")
self.showRadio.setMaximumWidth(50)
self.showRadio.setMinimumWidth(50)
self.showRadio.setChecked(True)
self.showRadio.toggled.connect(self.loadPreset) ## clicked
radioGroup.addButton(self.showRadio)
self.seqRadio = QtGui.QRadioButton("Seq")
self.seqRadio.setMaximumWidth(50)
self.seqRadio.setMinimumWidth(50)
self.seqRadio.toggled.connect(self.loadPreset) ## clicked
radioGroup.addButton(self.seqRadio)
self.shotRadio = QtGui.QRadioButton("Shot")
self.shotRadio.setMaximumWidth(50)
self.shotRadio.setMinimumWidth(50)
self.shotRadio.toggled.connect(self.loadPreset) ## clicked
radioGroup.addButton(self.shotRadio)
self.personalRadio = QtGui.QRadioButton("Personal")
self.personalRadio.setMaximumWidth(50)
self.personalRadio.setMinimumWidth(75)
self.personalRadio.toggled.connect(self.loadPreset) ## clicked
radioGroup.addButton(self.personalRadio)
radioLayout.addWidget(self.showRadio)
radioLayout.addWidget(self.seqRadio)
radioLayout.addWidget(self.shotRadio)
radioLayout.addWidget(self.personalRadio)
spacer2 = QtGui.QSpacerItem(15,0)
radioLayout.addSpacerItem(spacer2)
saveButton = QtGui.QPushButton("Save")
saveButton.setMaximumWidth(200)
radioLayout.addWidget(saveButton)
saveButton.clicked.connect(self.savePreset) ## clicked
spacer3 = QtGui.QSpacerItem(150,0)
radioLayout.addSpacerItem(spacer3)
#################### Top Frame ##############################################
self.top_frame = QtGui.QFrame()
self.top_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
layout.addWidget(self.top_frame)
self.top_frame.setLayout(QtGui.QHBoxLayout())
tl = FrameLabel("mask_label", "LightMtls", self.top_frame)
tl.frameLabelVarName.mouseReleaseEvent = self.topToggle
self.topListCheckBox = {}
for x in self.topList:
cb = UtilCreateCheckBox(x, x, self.top_frame)
self.topListCheckBox[x] = cb.buttonVarName
setattr(self, x, cb)
self.cbButtonList.update(self.topListCheckBox)
####################### Middle Top Frame #################################
self.middleTop_frame = QtGui.QFrame()
self.middleTop_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
layout.addWidget(self.middleTop_frame)
self.middleTop_frame.setLayout(QtGui.QHBoxLayout())
mtl = FrameLabel("RE_label", "RenderElem", self.middleTop_frame)
mtl.frameLabelVarName.mouseReleaseEvent = self.midTopToggle
self.midTopListCheckBox = {}
for x in self.middleTopList:
cb = UtilCreateCheckBox(x, x, self.middleTop_frame)
self.midTopListCheckBox[x] = cb.buttonVarName
setattr(self, x, cb)
self.cbButtonList.update(self.midTopListCheckBox)
########################## Middle Bottom Frame ##########################################
self.middleBot_frame = QtGui.QFrame()
self.middleBot_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
layout.addWidget(self.middleBot_frame)
self.middleBot_frame.setLayout(QtGui.QHBoxLayout())
mbl = FrameLabel("Shader_label", "Shaders", self.middleBot_frame)
mbl.frameLabelVarName.mouseReleaseEvent = self.midBotToggle
self.midBotListCheckBox = {}
for x in self.middleBotList:
cb = UtilCreateCheckBox(x, x, self.middleBot_frame)
self.midBotListCheckBox[x] = cb.buttonVarName
setattr(self, x, cb)
self.cbButtonList.update(self.midBotListCheckBox)
############################ Bottom Frame ##########################################
self.bottom_frame = QtGui.QFrame()
self.bottom_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
layout.addWidget(self.bottom_frame)
self.bottom_frame.setLayout(QtGui.QHBoxLayout())
bl = FrameLabel("Util_label", "Utilities", self.bottom_frame)
bl.frameLabelVarName.mouseReleaseEvent = self.bottomToggle
self.bottomListCheckBox = {}
for x in self.bottomList:
cb = UtilCreateCheckBox(x, x, self.bottom_frame)
self.bottomListCheckBox[x] = cb.buttonVarName
setattr(self, x, cb)
self.cbButtonList.update(self.bottomListCheckBox)
########### User Export Buttons #################################################
exportLayout = QtGui.QHBoxLayout()
layout.addLayout(exportLayout)
spacer4 = QtGui.QSpacerItem(125,50)
exportLayout.addSpacerItem(spacer4)
exportLabel = QtGui.QLabel("Add User Shader")
exportLayout.addWidget(exportLabel)
font2 = QtGui.QFont()
font2.setBold(True)
font2.setPointSize(12)
exportLabel.setFont(font2)
exportLabel.setMaximumWidth(150)
spacer18 = QtGui.QSpacerItem(10,0)
exportLayout.addSpacerItem(spacer18)
self.exportWindow = QtGui.QLineEdit()
self.exportWindow.setMaximumWidth(200)
self.exportWindow.setMaximumHeight(20)
exportLayout.addWidget(self.exportWindow)
spacer17 = QtGui.QSpacerItem(10,0)
exportLayout.addSpacerItem(spacer17)
exportButton = QtGui.QPushButton("Export")
exportButton.setMaximumWidth(100)
exportLayout.addWidget(exportButton)
exportButton.clicked.connect(self.exportNetwork) ## clicked
deleteButton = QtGui.QPushButton("Delete")
deleteButton.setMaximumWidth(100)
exportLayout.addWidget(deleteButton)
deleteButton.clicked.connect(self.youSure) ## clicked
spacer5 = QtGui.QSpacerItem(125,0)
exportLayout.addSpacerItem(spacer5)
############################################################################
radioLayout2 = QtGui.QHBoxLayout()
layout.addLayout(radioLayout2)
spacer6 = QtGui.QSpacerItem(150,0)
radioLayout2.addSpacerItem(spacer6)
radioLabel2 = QtGui.QLabel("Export to")
radioLayout2.addWidget(radioLabel2)
font2 = QtGui.QFont()
font2.setBold(True)
font2.setPointSize(12)
radioLabel2.setFont(font2)
radioLabel2.setMaximumWidth(100)
radioGroup2 = QtGui.QButtonGroup(self)
self.showRadio2 = QtGui.QRadioButton("Show")
self.showRadio2.setMaximumWidth(50)
self.showRadio2.setMinimumWidth(50)
self.showRadio2.toggled.connect(self.createUserCheckboxes) ## clicked
radioGroup2.addButton(self.showRadio2)
self.seqRadio2 = QtGui.QRadioButton("Seq")
self.seqRadio2.setMaximumWidth(50)
self.seqRadio2.setMinimumWidth(50)
self.seqRadio2.toggled.connect(self.createUserCheckboxes) ## clicked
radioGroup2.addButton(self.seqRadio2)
self.shotRadio2 = QtGui.QRadioButton("Shot")
self.shotRadio2.setMaximumWidth(50)
self.shotRadio2.setMinimumWidth(50)
self.shotRadio2.toggled.connect(self.createUserCheckboxes) ## clicked
radioGroup2.addButton(self.shotRadio2)
self.personalRadio2 = QtGui.QRadioButton("Personal")
self.personalRadio2.setMaximumWidth(50)
self.personalRadio2.setMinimumWidth(75)
self.personalRadio2.toggled.connect(self.createUserCheckboxes) ## clicked
radioGroup2.addButton(self.personalRadio2)
radioLayout2.addWidget(self.showRadio2)
radioLayout2.addWidget(self.seqRadio2)
radioLayout2.addWidget(self.shotRadio2)
radioLayout2.addWidget(self.personalRadio2)
self.showRadio2.setChecked(True)
spacer7 = QtGui.QSpacerItem(250,0)
radioLayout2.addSpacerItem(spacer7)
########## User Frame #########################################################
self.user_frame = QtGui.QFrame()
self.user_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
layout.addWidget(self.user_frame)
self.user_frame.setLayout(QtGui.QHBoxLayout())
ul = FrameLabel("User_label", "User", self.user_frame)
ul.frameLabelVarName.mouseReleaseEvent = self.userToggle
try:
self.userInfile = open('%s/userExport.yml' % getShowInfo.jobPath)#
self.userInSettings = yaml.load(self.userInfile)
self.userInfile.close()
for x in self.userInSettings.keys():
cb = UtilCreateCheckBox(x, x, self.user_frame)
self.userListCheckBox[x] = cb.buttonVarName
#setattr(self, x, cb)
self.cbButtonList.update(self.userListCheckBox)
except: pass
######################### Un/Check All buttons ##################################################
allCheckLayout = QtGui.QHBoxLayout()
layout.addLayout(allCheckLayout)
self.checkAll_button = QtGui.QPushButton("Check All")
allCheckLayout.layout().addWidget(self.checkAll_button)
self.checkAll_button.clicked.connect(self.checkAllFunction) ## clicked
self.checkNone_button = QtGui.QPushButton("Check None")
allCheckLayout.layout().addWidget(self.checkNone_button)
self.checkNone_button.clicked.connect(self.checkNoneFunction) ## clicked
####################### Import button #####################################################
self.import_button = QtGui.QPushButton("Import")
layout.addWidget(self.import_button)
self.import_button.setMinimumHeight(50)
self.import_button.clicked.connect(self.importButtonFunction) ## clicked
####################### Output Window ####################################################
self.outWindow = QtGui.QTextEdit()
self.outWindow.setReadOnly(True)
layout.addWidget(self.outWindow)
self.outWindow.setMaximumHeight(275)
############################################################################################
self.loadPreset() ## Loads show checkbox state on startup
self.setLayout(layout) # add main layout itself to this dialog
#########################################################################
################### Functions ###########################################
#########################################################################
################### Save/Load Preset Functions ##########################
def savePreset(self): ## Save Button Function
for x,y in self.cbButtonList.iteritems():
x=str(x)
if y.isChecked() == True:
cbState = True
else:
cbState = False
self.getState[x] = cbState
if self.showRadio.isChecked() == True:
if not os.path.exists(getShowInfo.jobPath):
os.makedirs(getShowInfo.jobPath)
with open('%s/utilToolbox.yml' % getShowInfo.jobPath, 'w') as outfile:
outfile.write(yaml.dump(self.getState, default_flow_style=False))
showConfig = "<font color=yellow>Saved SHOW preset.</font>"
self.outWindow.setText(showConfig)
elif self.seqRadio.isChecked() == True:
if not os.path.exists(getShowInfo.seqPath):
os.makedirs(getShowInfo.seqPath)
with open('%s/utilToolbox.yml' % getShowInfo.seqPath, 'w') as outfile:
outfile.write(yaml.dump(self.getState, default_flow_style=False))
seqConfig = "<font color=yellow>Saved SEQ preset.</font>"
self.outWindow.setText(seqConfig)
elif self.shotRadio.isChecked() == True:
if not os.path.exists(getShowInfo.shotPath):
os.makedirs(getShowInfo.shotPath)
with open('%s/utilToolbox.yml' % getShowInfo.shotPath, 'w') as outfile:
outfile.write(yaml.dump(self.getState, default_flow_style=False))
shotConfig = "<font color=yellow>Saved SHOT preset.</font>"
self.outWindow.setText(shotConfig)
elif self.personalRadio.isChecked() == True:
if not os.path.exists(getShowInfo.personalPath):
os.makedirs(getShowInfo.personalPath)
with open('%s/utilToolbox.yml' % getShowInfo.personalPath, 'w') as outfile:
outfile.write(yaml.dump(self.getState, default_flow_style=False))
personalConfig = "<font color=yellow>Saved PERSONAL preset.</font>"
self.outWindow.setText(personalConfig)
############### Creates Checknboxes in the User Frame ##############################
def createUserCheckboxes(self):
for x,y in self.userListCheckBox.iteritems():
y.setParent(None)
self.userInSettings = {}
self.userListCheckBox = {}
if self.showRadio2.isChecked() == True:
whichPath = getShowInfo.jobPath
elif self.seqRadio2.isChecked() == True:
whichPath = getShowInfo.seqPath
elif self.shotRadio2.isChecked() == True:
whichPath = getShowInfo.shotPath
elif self.personalRadio2.isChecked() == True:
whichPath = getShowInfo.personalPath
try:
self.userInfile = open('%s/userExport.yml' % whichPath)
self.userInSettings = yaml.load(self.userInfile)
self.userInfile.close()
for x,y in self.userInSettings.iteritems():
cb = UtilCreateCheckBox(x, x, self.user_frame)
self.userListCheckBox[x] = cb.buttonVarName
setattr(self, x, cb)
self.cbButtonList.update(self.userListCheckBox)
except: pass
################## Load CheckState Function -- Called when top radio button changes ####################################################
def loadPreset(self): ## Load Button Function
getShowInfo()
try:
if self.showRadio.isChecked() == True:
infile = open('%s/utilToolbox.yml' % getShowInfo.jobPath)
showConfig = "<font color=yellow>Loaded SHOW preset.</font>"
self.outWindow.setText(showConfig)
elif self.seqRadio.isChecked() == True:
infile = open('%s/utilToolbox.yml' % getShowInfo.seqPath)
seqConfig = "<font color=yellow>Loaded SEQ preset.</font>"
self.outWindow.setText(seqConfig)
elif self.shotRadio.isChecked() == True:
infile = open('%s/utilToolbox.yml' % getShowInfo.shotPath)
shotConfig = "<font color=yellow>Loaded SHOT preset.</font>"
self.outWindow.setText(shotConfig)
elif self.personalRadio.isChecked() == True:
infile = open('%s/utilToolbox.yml' % getShowInfo.personalPath)
personalConfig = "<font color=yellow>Loaded PERSONAL preset.</font>"
self.outWindow.setText(personalConfig)
inSettings = yaml.load(infile)
infile.close()
for x in self.cbButtonList:
if x in inSettings.keys():
if inSettings[x]:
try:
exec('self.%s.buttonVarName.setCheckState(QtCore.Qt.Checked)' % (x))
except: pass
else:
try:
exec('self.%s.buttonVarName.setCheckState(QtCore.Qt.Unchecked)' % (x))
except: pass
except:
noConfig = "<font color=red>NO CONFIG FILE EXISTS</font>"
self.outWindow.setText(noConfig)
############# Export Button Function ###############################################################
def exportNetwork(self):
if bool(self.userInSettings):
self.exportShadDict = self.userInSettings
exportText = self.exportWindow.text()
self.userList2 = []
self.userList2.append(exportText)
if len(exportText) > 0 :
for x in self.userList2:
if x not in self.userListCheckBox:
cb = UtilCreateCheckBox(x, x, self.user_frame)
self.userListCheckBox[x] = cb.buttonVarName
setattr(self, x, cb)
self.cbButtonList.update(self.userListCheckBox)
getShowInfo()
if self.showRadio2.isChecked() == True:
self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.jobPath,exportText)
if not os.path.exists(getShowInfo.jobPath):
os.makedirs(getShowInfo.jobPath)
exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.jobPath,exportText), es=True, typ="mayaBinary")
with open('%s/userExport.yml' % (getShowInfo.jobPath), 'w') as outfile:
outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False))
showConfig = "<font color=yellow>Saved SHOW network '%s'.</font>" % exportText
self.outWindow.setText(showConfig)
self.exportShadDict = {}
elif self.seqRadio2.isChecked() == True:
self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.seqPath,exportText)
if not os.path.exists(getShowInfo.seqPath):
os.makedirs(getShowInfo.seqPath)
exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.seqPath,exportText), es=True, typ="mayaBinary")
with open('%s/userExport.yml' % (getShowInfo.seqPath), 'w') as outfile:
outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False))
seqConfig = "<font color=yellow>Saved SEQ network '%s'.</font>" % exportText
self.outWindow.setText(seqConfig)
self.exportShadDict = {}
elif self.shotRadio2.isChecked() == True:
self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.shotPath,exportText)
if not os.path.exists(getShowInfo.shotPath):
os.makedirs(getShowInfo.shotPath)
exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.shotPath,exportText), es=True, typ="mayaBinary")
with open('%s/userExport.yml' % (getShowInfo.shotPath), 'w') as outfile:
outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False))
shotConfig2 = "<font color=yellow>Saved SHOT network '%s'.</font>" % exportText
self.outWindow.setText(shotConfig2)
self.exportShadDict = {}
elif self.personalRadio2.isChecked() == True:
self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.personalPath,exportText)
if not os.path.exists(getShowInfo.personalPath):
os.makedirs(getShowInfo.personalPath)
exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.personalPath,exportText), es=True, typ="mayaBinary")
with open('%s/userExport.yml' % (getShowInfo.personalPath), 'w') as outfile:
outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False))
#print exportShadDict
personalConfig2 = "<font color=yellow>Saved PERSONAL network'%s'.</font>" % exportText
self.outWindow.setText(personalConfig2)
self.exportShadDict = {}
else:
noTextName = "<font color=red>Network needs to be named before it can be exported</font>"
self.outWindow.setText(noTextName)
################# Import Button Function #################################################################
def importButtonFunction(self): ## Import Button Function
#### sets to default layer and enables Vray is not already enabled#######
cmds.editRenderLayerGlobals(currentRenderLayer='defaultRenderLayer')
if cmds.pluginInfo('vrayformaya', q=True, loaded=True) == False:
cmds.loadPlugin('vrayformaya', qt=True)
output = []
warningSphere = []
warningPlate = []
getShotCamInfo()
for x,y in self.cbButtonList.iteritems():
if x == "Red" and y.isChecked() == True:
CreateRGBLightMaterials('RED',1,0,0)
output.append("Created Red VRay Light Material")
if x == "Green" and y.isChecked() == True:
CreateRGBLightMaterials('GREEN',0,1,0)
output.append("Created Green VRay Light Material")
if x == "Blue" and y.isChecked() == True:
CreateRGBLightMaterials('BLUE',0,0,1)
output.append("Created Blue VRay Light Material")
if x == "White" and y.isChecked() == True:
CreateRGBLightMaterials('WHITE',1,1,1)
output.append("Created White VRay Light Material")
if x == "Black" and y.isChecked() == True:
CreateRGBLightMaterials('BLACK',0,0,0)
output.append("Created Black VRay Light Material")
if x == "Shadow" and y.isChecked() == True:
CreateRenderElements('shadow')
output.append("Created Matte Shadow Render Element")
if x == "Contact_Shadow" and y.isChecked() == True:
CreateCatchers('contact_shadow')
CreateRenderElements('contactShadow')
output.append("Created Contact Shadow Render Element")
output.append("Created Conatct Shadow VRay Dirt Texture")
if x == "Reflection_Occ" and y.isChecked() == True:
CreateCatchers('reflection')
CreateRenderElements('refl_occ')
output.append("Created Reflection Occlusion VRay Dirt Texture")
output.append("Created Refleection Occlusion Render Element")
if x == "Fresnel" and y.isChecked() == True:
CreateRenderElements('fresnel')
output.append("Created VRay Frensel Utility")
output.append("Created Fresnel Render Element")
if x == "Shadow_Catcher" and y.isChecked() == True:
CreateCatchers('shadow')
output.append("Created Shadow Catcher Vray Mtl")
if x == "UV" and y.isChecked() == True:
CreateUV()
output.append("Created UV pass Render Element")
if x == "Plate_Projection" and y.isChecked() == True:
PlateProject()
output.append("Created Plate Projection Shader")
if not cmds.objExists(getShotCamInfo.shotCam):
warningPlate.append("Could not link plate projection node to shotcam. Shotcam does not exist. Import the shotcam and run the tool again with this selection to fix the camera attachments.")
if x == "Reflection_Catcher" and y.isChecked() == True:
CreateCatchers('reflection')
output.append("Created Reflection Catcher Vray Mtl")
if x == "Ref_Spheres" and y.isChecked() == True:
CreateRefSphere()
output.append("Created Reference Spheres and Color Chart")
if not cmds.objExists(getShotCamInfo.shotCam):
print 'could not find the shotcam'
warningSphere.append("Could not position and constrain ref spheres shotcam. Shotcam does not exist. Import the shotcam and run the tool again with this selection to fix the camera attachments.")
for x,y in self.userListCheckBox.iteritems():
if y.isChecked() == True:
cmds.file('%s' % (self.userInSettings[x]), i=True, ns='%s' % x)
output.append("Loaded '%s'" % x)
############# Output Statements #################################
conformOutput = '\n'.join(output) ## reformats output list
conformSphereWarn = '\n'.join(warningSphere) ## reformats output list
conformPlateWarn = '\n'.join(warningPlate) ## reformats output list
warningSphereOut = "<font color=red>" + conformSphereWarn + "</font>" ## turn that string red
warningPlateOut = "<font color=red>" + conformPlateWarn + "</font>" ## turn that string red
self.outWindow.setText(conformOutput) ## prints output in output box
self.outWindow.append(warningSphereOut) ## prints warnings in output box
self.outWindow.append(warningPlateOut) ## prints warnings in output box
############# Un/Check All Functions ######################
def checkAllFunction(self): ## Check All Button Function
for x,y in self.cbButtonList.iteritems():
y.setChecked(True)
def checkNoneFunction(self): ## Check None Button Function
for x,y in self.cbButtonList.iteritems():
y.setChecked(False)
################### Delete User Network Button -- Delete Button Function #############################################
def deleteUser(self):
if self.showRadio2.isChecked() == True:
whichPath = getShowInfo.jobPath
elif self.seqRadio2.isChecked() == True:
whichPath = getShowInfo.seqPath
elif self.shotRadio2.isChecked() == True:
whichPath = getShowInfo.shotPath
elif self.personalRadio2.isChecked() == True:
whichPath = getShowInfo.personalPath
for x,y in self.userListCheckBox.iteritems():
if y.isChecked() == True:
os.remove(self.userInSettings[x])
y.setParent(None)
del self.userInSettings[x]
with open('%s/userExport.yml' % (whichPath), 'w') as outfile:
outfile.write(yaml.dump(self.userInSettings, default_flow_style=False))
########## Pop Up Window Launcher #####################################################################
def youSure(self):
whichBoxes = []
for x,y in self.userListCheckBox.iteritems():
if y.isChecked() == True:
newX=str(x)
whichBoxes.append(newX)
#whichBoxes = ''.join(whichBoxes)
msgBox = QtGui.QMessageBox()
msgBox.setText("Are you sure you want to remove %s???" % whichBoxes)
msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
ret = msgBox.exec_()
if ret == msgBox.Ok:
self.deleteUser()
################ Toggle each line Functions ###################################
def topToggle(self, event): ## Top list CB toggle
flipRow(self.topListCheckBox)
def midTopToggle(self, event): ## midTop list CB toggle
flipRow(self.midTopListCheckBox)
def midBotToggle(self, event): ## midBot list CB toggle
flipRow(self.midBotListCheckBox)
def bottomToggle(self, event): ## Bottom list CB toggle
flipRow(self.bottomListCheckBox)
def userToggle(self, event): ## User list CB toggle
flipRow(self.userListCheckBox)
def flipRow(whichList): ## toggle row of checkboxes if you click on the label
if whichList.values()[0].isChecked() == True:
for x,y in whichList.iteritems():
y.setChecked(False)
else:
for x,y in whichList.iteritems():
y.setChecked(True)
############ Get Show/Seq/Shot/User Information ###################################################
def getShowInfo():
tackOn = '/TECH/config/toolbox'
tackOnJob = '/TECH/lib/maya/config/toolbox'
tackOnPersonal = '/config/toolbox'
getShowInfo.shotPath = os.environ.get('M_SHOT_PATH') + tackOn
getShowInfo.seqPath = os.environ.get('M_SEQUENCE_PATH') + tackOn
getShowInfo.jobPath = os.environ.get('M_JOB_PATH') + tackOnJob
getShowInfo.personalPath = os.environ.get('HOME') + tackOnPersonal
################## Get ShotCam ##################################################
def getShotCamInfo():
shotcamList = ['shotcam*:shot_camera']
getCamList = cmds.ls(shotcamList)
if cmds.objExists('shotcam1:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam1:shot_camera')[0]
elif cmds.objExists('shotcam_alexaAna1:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam_alexaAna1:shot_camera')[0]
elif cmds.objExists('shotcam_alexaHD1:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam_alexaHD1:shot_camera')[0]
elif cmds.objExists('shotcam_5DMark3HD1:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam_5DMark3HD1:shot_camera')[0]
elif cmds.objExists('shotcam_redDragonX1:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam_redDragonX1:shot_camera')[0]
elif cmds.objExists('shotcam_redDragonBarge1:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam_redDragonBarge1:shot_camera')[0]
elif cmds.objExists('shotcam_fg:shot_camera'):
getShotCamInfo.shotCam = cmds.ls('shotcam_fg:shot_camera')[0]
#if shotcamList > 1:
#if 'shotcam_fg:shot_camera' in getCamList:
#getCamList.remove('shotcam_fg:shot_camera')
#getShotCamInfo.shotCam = getCamList[0]
#print getShotCamInfo.shotCam
getShotCamInfo()
################## Create Checkbox Class ########################################
class UtilCreateCheckBox(object):
def __init__(self, buttonVarName, buttonLabelName, frame):
self.buttonVarName = QtGui.QCheckBox(buttonLabelName)
frame.layout().addWidget(self.buttonVarName)
font = QtGui.QFont()
font.setPointSize(10)
self.buttonVarName.setFont(font)
self.buttonVarName.setChecked(True)
################# Create Frames Class ###########################################
class FrameLabel(object):
def __init__(self, frameLabelVarName, frameLabelName, frame):
self.frameLabelName = frameLabelName
self.frame = frame
self.frameLabelVarName = QtGui.QLabel(frameLabelName)
frame.layout().addWidget(self.frameLabelVarName)
font = QtGui.QFont()
font.setBold(True)
font.setPointSize(15)
self.frameLabelVarName.setFont(font)
########################################################################
########################################################################
########################################################################
############################### BACK END ###############################
########################################################################
########################################################################
########################################################################
class CreateRGBLightMaterials(object):
def __init__(self, shaderName, R, G, B):
self.shaderName = shaderName
shaderSGName = shaderName + "_SG"
self.R = R
self.G = G
self.B = B
if not cmds.objExists(shaderName):
mtlName = cmds.shadingNode('VRayLightMtl', asShader=True, name=shaderName)
cmds.setAttr('%s.color' % (mtlName), R,G,B, type='double3')
cmds.setAttr('%s.emitOnBackSide' % (mtlName), 1)
mtlSG = cmds.sets(name = shaderSGName, renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (mtlName) ,'%s.surfaceShader' % (mtlSG))
else:
mtlName = cmds.ls(shaderName)[0]
class CreateCatchers(object):
def __init__(self, type):
self.type = type
## type meaning 'shadow' or 'reflection' catcher
if type.lower() == 'shadow':
if not cmds.objExists('SHADOW_CATCHER'):
shdCatcher = cmds.shadingNode('VRayMtl', asShader=True, name='SHADOW_CATCHER')
shdCatcherSG = cmds.sets(name = 'SHADOW_CATCHER_SG', renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (shdCatcher) ,'%s.surfaceShader' % (shdCatcherSG))
cmds.setAttr('%s.reflectionColorAmount' % (shdCatcher), 0)
cmds.setAttr('%s.diffuseColorAmount' % (shdCatcher), 1)
cmds.setAttr('%s.brdfType' % (shdCatcher), 0)
cmds.setAttr('%s.useFresnel' % (shdCatcher), 0)
## creates shadow catching VRayMtl
if type.lower() == 'contact_shadow':
if not cmds.objExists('CONTACT_SHADOW_CATCHER'):
contactShadCatcher = cmds.shadingNode('VRayDirt', asTexture=True, name='CONTACT_SHADOW_CATCHER')
cmds.setAttr('%s.blackColor' % (contactShadCatcher), 1,1,1, type='double3')
cmds.setAttr('%s.whiteColor' % (contactShadCatcher), 0,0,0, type='double3')
cmds.setAttr('%s.radius' % (contactShadCatcher), 10)
cmds.setAttr('%s.ignoreSelfOcclusion' % (contactShadCatcher), 1)
cmds.setAttr('%s.resultAffectInclusive' % (contactShadCatcher), 0)
## creates VrayDirt used for ambient occlusion
elif type.lower() == 'reflection':
if not cmds.objExists('REFL_CATCHER'):
mirrorMtl = cmds.shadingNode('VRayMtl', asShader=True, name='REFL_CATCHER')
mirrorMtlSG = cmds.sets(name = 'REFL_CATCHER_SG', renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (mirrorMtl) ,'%s.surfaceShader' % (mirrorMtlSG))
cmds.setAttr('%s.color' % (mirrorMtl), 0,0,0, type='double3')
cmds.setAttr('%s.reflectionColor' % (mirrorMtl), 1,1,1, type='double3')
cmds.setAttr('%s.reflectionColorAmount' % (mirrorMtl), 1)
cmds.setAttr('%s.diffuseColorAmount' % (mirrorMtl), 0)
cmds.setAttr('%s.useFresnel' % (mirrorMtl), 0)
mirrorOccl = cmds.shadingNode('VRayDirt', asTexture=True, name='MIRROR_REFLOCC')
cmds.setAttr('%s.blackColor' % (mirrorOccl), 1,1,1, type='double3')
cmds.setAttr('%s.whiteColor' % (mirrorOccl), 0,0,0, type='double3')
cmds.setAttr('%s.radius' % (mirrorOccl), 1000)
cmds.setAttr('%s.occlusionMode' % (mirrorOccl), 2)
cmds.connectAttr('%s.outColor' % (mirrorOccl), '%s.reflectionColor' % (mirrorMtl))
cmds.connectAttr('%s.reflectionGlossiness' % (mirrorMtl), '%s.glossiness' % (mirrorOccl))
mkbrdfTypeOffset = cmds.shadingNode('plusMinusAverage', asUtility=True, name='brdfOffset')
cmds.connectAttr('%s.brdfType' % (mirrorMtl), '%s.input1D[0]' % (mkbrdfTypeOffset))
cmds.setAttr('%s.input1D[1]' % (mkbrdfTypeOffset), 1)
#cmds.connectAttr('%s.output1D' % (mkbrdfTypeOffset), '%s.occlusionMode' % (mirrorOccl))
cmds.expression(s='MIRROR_REFLOCC.occlusionMode = brdfOffset.output1D;', ae=True)
cmds.connectAttr('%s.reflectionSubdivs' % (mirrorMtl), '%s.subdivs' % (mirrorOccl))
## creates relfection catching VrayMtl and VRay dirt for an RO
MIRROR_REFLOCC.occlusionMode = brdfOffset.output1D;
## example creation ##
## createReflectionCatcher = CreateCatchers('reflection')
class CreateRenderElements(object):
def __init__(self,type):
self.type = type
if type.lower() == 'shadow':
if not cmds.objExists('vrayRE_MatteShadow'):
vfb.matteShadow('vrayRE_MatteShadow', enabled=False)
## creates cast shadow render element
if type.lower() == 'contactshadow':
if not cmds.objExists('vrayRE_ContactShadow'):
if cmds.objExists('CONTACT_SHADOW_CATCHER'):
vfb.extraTex('vrayRE_ContactShadow', 'CONTACT_SHADOW_CATCHER', explicit_channel='contactShadow', enabled=False)
## creates contact shadow render element
if type.lower() == 'fresnel':
if not cmds.objExists('vrayRE_Fresnel'):
createFresnel = cmds.shadingNode('VRayFresnel', asTexture=True, name='VrayFresnel')
createFresnelTwoD = cmds.shadingNode('place2dTexture', asUtility=True, name='place2dFresnel')
cmds.connectAttr('%s.outUV' % (createFresnelTwoD), '%s.uvCoord' % (createFresnel))
cmds.connectAttr('%s.outUvFilterSize' % (createFresnelTwoD), '%s.uvFilterSize' % (createFresnel))
vfb.extraTex('vrayRE_Fresnel', 'VrayFresnel', explicit_channel='fresnel', enabled=False)
## creates fresnel render element
if type.lower() == 'refl_occ':
if not cmds.objExists('vrayRE_reflectionOcclusion'):
if cmds.objExists('MIRROR_REFLOCC'):
vfb.extraTex('vrayRE_reflectionOcclusion', 'MIRROR_REFLOCC', explicit_channel='reflectionOcclusion', enabled=False)
## creates contact shadow render element
## example creation ##
## createShadowRE = CreateRenderElements('contactShadow')
class PlateProject(object):
def __init__(self):
getShotCamInfo()
projectCam = getShotCamInfo.shotCam
if not cmds.objExists('plateProject'):
projShader = cmds.shadingNode('VRayMtl', asShader=True, name='plateProject')
projShaderSG = cmds.sets(name = 'plateProject_SG', renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (projShader) ,'%s.surfaceShader' % (projShaderSG))
cmds.setAttr('%s.useFresnel' % (projShader), 0)
cmds.setAttr('%s.diffuseColorAmount' % (projShader), 0)
## creates shader
plateTexture = cmds.shadingNode('file', asTexture=True, name='plateTexture')
cmds.setAttr('%s.defaultColor' % (plateTexture), 0,0,0, type='double3')
cmds.setAttr('%s.useFrameExtension' % (plateTexture), 1)
## creates texture node
fileProject = cmds.shadingNode('projection', asTexture=True, name='projectNodePlate')
cmds.setAttr('%s.projType' % (fileProject), 8)
cmds.setAttr('%s.fitType' % (fileProject), 1)
cmds.setAttr('%s.fitFill' % (fileProject), 0)
cmds.setAttr('%s.alphaOffset' % (fileProject), 1)
cmds.setAttr('%s.defaultColor' % (fileProject), 0,0,0, type='double3')
## creates projection node
twoD = cmds.shadingNode('place2dTexture', asUtility=True, name='PlatePlace2d')
cmds.setAttr('%s.wrapU' % (twoD), 0)
cmds.setAttr('%s.wrapV' % (twoD), 0)
## creates place2D for plate texture
threeD = cmds.shadingNode('place3dTexture', asUtility=True, name='PlatePlace3d')
## creates place3D for camera
cmds.connectAttr('%s.outColor' % (fileProject), '%s.illumColor' % (projShader))
cmds.connectAttr('%s.outColor' % (plateTexture), '%s.image' % (fileProject))
cmds.connectAttr('%s.worldInverseMatrix' % (threeD), '%s.placementMatrix' % (fileProject))
cmds.connectAttr('%s.outAlpha' % (fileProject), '%s.opacityMapR' % (projShader))
cmds.connectAttr('%s.outAlpha' % (fileProject), '%s.opacityMapG' % (projShader))
cmds.connectAttr('%s.outAlpha' % (fileProject), '%s.opacityMapB' % (projShader))
## connects texture, alpha, shader, projection, and 3D placement
place2DConnections = ('coverage', 'translateFrame', 'rotateFrame', 'mirrorU', 'mirrorV', 'stagger', 'wrapU', 'wrapV', 'repeatUV',
'offset', 'rotateUV', 'noiseUV', 'vertexUvOne', 'vertexUvTwo', 'vertexUvThree', 'vertexCameraOne')
for x in place2DConnections:
cmds.connectAttr('%s.%s' % (twoD, x), '%s.%s' % (plateTexture, x))
cmds.connectAttr('%s.outUV' % (twoD), '%s.uv' % (plateTexture))
cmds.connectAttr('%s.outUvFilterSize' % (twoD), '%s.uvFilterSize' % (plateTexture))
## connects place2D for plate texture
if cmds.objExists(projectCam):
fileProject = cmds.ls('projectNodePlate')[0]
getIfConnect = cmds.listConnections('%s.linkedCamera' % (fileProject), d=False, s=True)
if getIfConnect == None:
cmds.connectAttr('%s' % (projectCam) + 'Shape.message', '%s.linkedCamera' % (fileProject), f=True)
## connects shotcam to the proj cam if it exists
## example creation ##
## createPlateProject = PlateProject()
class CreateRefSphere(object):
def __init__(self):
if not cmds.objExists('greyBallShader'):
diffShader = cmds.shadingNode('VRayMtl', asShader=True, name='greyBallShader')
diffShaderSG = cmds.sets(name = 'greyBallSG', renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (diffShader) ,'%s.surfaceShader' % (diffShaderSG))
cmds.setAttr('%s.useFresnel' % (diffShader), 0)
cmds.setAttr('%s.color' % (diffShader), 0.18,0.18,0.18, type='double3')
## creates and assigns grey ball shader
if not cmds.objExists('greyBall'):
diffBall = cmds.polySphere(name='greyBall', r=2.5)
cmds.setAttr('%s.translateY' % (diffBall[0]), 6)
cmds.delete(diffBall, ch=True)
## creates grey ball geo
cmds.sets(diffBall[0], e=True, forceElement='greyBallSG')
## assigns grey ball shader to geo
if not cmds.objExists('chromeBallShader'):
refShader = cmds.shadingNode('VRayMtl', asShader=True, name='chromeBallShader')
refShaderSG = cmds.sets(name = 'chromeBallSG', renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (refShader) ,'%s.surfaceShader' % (refShaderSG))
cmds.setAttr('%s.useFresnel' % (refShader), 0)
cmds.setAttr('%s.color' % (refShader), 0, 0, 0, type='double3')
cmds.setAttr('%s.reflectionColor' % (refShader), 1, 1, 1, type='double3')
cmds.setAttr('%s.diffuseColorAmount' % (refShader), 0)
cmds.setAttr('%s.reflectionsMaxDepth' % (refShader), 2)
## creates chrome ball shader
if not cmds.objExists('chromeBall'):
refBall = cmds.polySphere(name='chromeBall', r=2.5)
cmds.setAttr('%s.translate' % (refBall[0]), 7,6,0)
cmds.delete(refBall, ch=True)
## creates chrome ball geo
cmds.sets(refBall[0], e=True, forceElement='chromeBallSG')
## assigns chrome ball shader to geo
colorChartTexturePath = '/jobs/asset_library/sequences/assets/common/pub/hdr_library/ColorChecker_linear_from_Avg_16bit.exr'
## color chart texture path
if not cmds.objExists('colorChartShader'):
chartShader = cmds.shadingNode('VRayLightMtl', asShader=True, name='colorChartShader')
chartShaderSG = cmds.sets(name = 'chartShaderSG', renderable=True,noSurfaceShader=True,empty=True)
cmds.connectAttr('%s.outColor' % (chartShader) ,'%s.surfaceShader' % (chartShaderSG))
cmds.setAttr('%s.emitOnBackSide' % (chartShader), 1)
## creates color chart VrayLightMtl
if not cmds.objExists('colorChart'):
colorChart = cmds.polyPlane(name='colorChart', h=5,w=5,sx=1,sy=1)
#cmds.setAttr('%s.translate' % (colorChart[0]), 0,6,0)
cmds.setAttr('%s.rotateX' % (colorChart[0]), 90)
## creates color chart geo
cmds.sets(colorChart[0], e=True, forceElement='chartShaderSG')
## assigns shader
if not cmds.objExists('chartTexture'):
chartTexture = cmds.shadingNode('file', asTexture=True, name='chartTexture')
chartTwoD = cmds.shadingNode('place2dTexture', asUtility=True, name='chartPlace2d')
chart2DConnections = ('coverage', 'translateFrame', 'rotateFrame', 'mirrorU', 'mirrorV', 'stagger', 'wrapU', 'wrapV', 'repeatUV',
'offset', 'rotateUV', 'noiseUV', 'vertexUvOne', 'vertexUvTwo', 'vertexUvThree', 'vertexCameraOne')
for x in chart2DConnections:
cmds.connectAttr('%s.%s' % (chartTwoD, x), '%s.%s' % (chartTexture, x))
cmds.connectAttr('%s.outUV' % (chartTwoD), '%s.uv' % (chartTexture))
cmds.connectAttr('%s.outUvFilterSize' % (chartTwoD), '%s.uvFilterSize' % (chartTexture))
cmds.connectAttr('%s.outColor' % (chartTexture), '%s.color' % (chartShader))
## creates and connects file texture node
cmds.setAttr('%s.fileTextureName' % (chartTexture), colorChartTexturePath, type='string')
############ So dumb but I can't get the file tetxture path to fully eval without selecting the file node ###################
cmds.select(chartTexture, r=True)
## feeds in colro chart texture path
if not cmds.objExists('RefSphere_GRP'):
refSetupGroupName = 'RefSphere_GRP'
refSetupTransGroup = 'TranslateThis'
refSetupGroupMembers = (colorChart[0], refBall[0], diffBall[0])
translateGroup = cmds.group(refSetupGroupMembers, name=refSetupTransGroup)
refSetupGroup = cmds.group(translateGroup, name=refSetupGroupName)
getShotCamInfo()
if getShotCamInfo.shotCam > 0:
if cmds.objExists(getShotCamInfo.shotCam):
refSetupGroup = cmds.ls('RefSphere_GRP')[0]
translateGroup = cmds.ls('TranslateThis')[0]
getIfConnect = cmds.listConnections('%s.tx' % (refSetupGroup), d=False, s=True)
if getIfConnect == None:
cmds.parentConstraint(getShotCamInfo.shotCam, refSetupGroup, mo=False)
cmds.setAttr('%s.translate' % (translateGroup), 0, 0, -100)
## creates groups and constrains to camera
## example creation ##
## createRefSpheres = CreateRefSphere()
class CreateUV(object):
def __init__(self):
if not cmds.objExists('vrayRE_RE_UV'):
uvSampler = cmds.shadingNode('samplerInfo', asUtility=True, name='UVsampler')
uvExtraTex = vfb.extraTex('vrayRE_UV', uvSampler, explicit_channel='UV', enabled=False)
cmds.connectAttr('%s.uCoord' % uvSampler, 'vrayRE_UV.vray_texture_extratexR')
cmds.connectAttr('%s.vCoord' % uvSampler, 'vrayRE_UV.vray_texture_extratexG')
## example creation ##
## createRefSpheres = CreateUV()
def launchUI():
global vrayToolBoxUtil
# will try and close the ui if it exists
try: vrayToolBoxUtil.close()
except: pass
vrayToolBoxUtil = UtilityToolBoxUI()
vrayToolBoxUtil.show()
vrayToolBoxUtil.raise_()
################## Show Window #######################################
if __name__ == "__main__":
launchUI()
|
[
"from mVray import vrayFrameBuffers as vfb\nfrom PySide import QtGui\nfrom PySide import QtCore\nimport maya.cmds as cmds\nimport maya.OpenMayaUI as mui\nimport shiboken\nimport yaml\nimport os\n\n\n########################################################################\n############################### GUI ####################################\n########################################################################\n\ndef getMayaWindow():\n pointer = mui.MQtUtil.mainWindow()\n return shiboken.wrapInstance(long(pointer), QtGui.QWidget)\n \nclass UtilityToolBoxUI(QtGui.QDialog):\n\n def __init__(self, parent=getMayaWindow()):\n super(UtilityToolBoxUI, self).__init__(parent)\n \n getShowInfo()\n \n self.setWindowTitle(\"Utility Toolbox\")\n self.setWindowFlags(QtCore.Qt.Tool) # makes it a tool window so it will stay on top of maya\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose) # deletes UI when closed\n \n ########### Checkbox Label List #####################################\n \n self.topList = [\"Red\", \"Green\", \"Blue\", \"White\", \"Black\"]\n self.middleTopList = [\"Shadow\", \"Contact_Shadow\", \"Fresnel\", \"Reflection_Occ\"]\n self.middleBotList = [\"Shadow_Catcher\", \"Plate_Projection\", \"Reflection_Catcher\"]\n self.bottomList = [\"Ref_Spheres\", \"UV\"]\n self.userList = []\n \n #############################################################################\n\n self.createLayout() # runs function below\n \n ################################################################################ \n ##################### Layout Creation ########################################## \n ################################################################################\n \n def createLayout(self):\n \n layout = QtGui.QVBoxLayout() # main layout\n self.setMinimumHeight(650)\n self.setMinimumWidth(750)\n layout.setSpacing(0)\n \n ########### Catch All Checkboxes Here ################\n \n self.cbButtonList = {}\n self.getState = {}\n self.userListCheckBox = {}\n self.exportShadDict = {}\n \n ############ Save/Load Preset ##########################\n \n radioLayout = QtGui.QHBoxLayout()\n layout.addLayout(radioLayout)\n \n spacer = QtGui.QSpacerItem(175,0)\n radioLayout.addSpacerItem(spacer)\n \n radioLabel = QtGui.QLabel(\"Save Preset\")\n radioLayout.addWidget(radioLabel)\n font = QtGui.QFont()\n font.setBold(True)\n font.setPointSize(12)\n radioLabel.setFont(font)\n radioLabel.setMaximumWidth(100)\n \n radioGroup = QtGui.QButtonGroup(self)\n \n self.showRadio = QtGui.QRadioButton(\"Show\")\n self.showRadio.setMaximumWidth(50)\n self.showRadio.setMinimumWidth(50)\n self.showRadio.setChecked(True)\n self.showRadio.toggled.connect(self.loadPreset) ## clicked\n \n radioGroup.addButton(self.showRadio)\n \n self.seqRadio = QtGui.QRadioButton(\"Seq\")\n self.seqRadio.setMaximumWidth(50)\n self.seqRadio.setMinimumWidth(50)\n self.seqRadio.toggled.connect(self.loadPreset) ## clicked\n radioGroup.addButton(self.seqRadio)\n \n self.shotRadio = QtGui.QRadioButton(\"Shot\")\n self.shotRadio.setMaximumWidth(50)\n self.shotRadio.setMinimumWidth(50)\n self.shotRadio.toggled.connect(self.loadPreset) ## clicked\n radioGroup.addButton(self.shotRadio)\n \n self.personalRadio = QtGui.QRadioButton(\"Personal\")\n self.personalRadio.setMaximumWidth(50)\n self.personalRadio.setMinimumWidth(75)\n self.personalRadio.toggled.connect(self.loadPreset) ## clicked \n radioGroup.addButton(self.personalRadio)\n \n radioLayout.addWidget(self.showRadio)\n radioLayout.addWidget(self.seqRadio)\n radioLayout.addWidget(self.shotRadio)\n radioLayout.addWidget(self.personalRadio)\n \n spacer2 = QtGui.QSpacerItem(15,0)\n radioLayout.addSpacerItem(spacer2)\n \n saveButton = QtGui.QPushButton(\"Save\")\n saveButton.setMaximumWidth(200)\n radioLayout.addWidget(saveButton)\n saveButton.clicked.connect(self.savePreset) ## clicked\n \n spacer3 = QtGui.QSpacerItem(150,0)\n radioLayout.addSpacerItem(spacer3)\n \n #################### Top Frame ##############################################\n \n self.top_frame = QtGui.QFrame()\n self.top_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)\n layout.addWidget(self.top_frame)\n \n self.top_frame.setLayout(QtGui.QHBoxLayout())\n \n tl = FrameLabel(\"mask_label\", \"LightMtls\", self.top_frame)\n tl.frameLabelVarName.mouseReleaseEvent = self.topToggle\n self.topListCheckBox = {}\n \n for x in self.topList:\n cb = UtilCreateCheckBox(x, x, self.top_frame)\n self.topListCheckBox[x] = cb.buttonVarName\n setattr(self, x, cb)\n \n self.cbButtonList.update(self.topListCheckBox)\n \n ####################### Middle Top Frame #################################\n \n self.middleTop_frame = QtGui.QFrame()\n self.middleTop_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)\n layout.addWidget(self.middleTop_frame)\n \n self.middleTop_frame.setLayout(QtGui.QHBoxLayout())\n \n mtl = FrameLabel(\"RE_label\", \"RenderElem\", self.middleTop_frame)\n mtl.frameLabelVarName.mouseReleaseEvent = self.midTopToggle\n\n self.midTopListCheckBox = {}\n \n for x in self.middleTopList:\n cb = UtilCreateCheckBox(x, x, self.middleTop_frame)\n self.midTopListCheckBox[x] = cb.buttonVarName\n setattr(self, x, cb)\n \n self.cbButtonList.update(self.midTopListCheckBox)\n \n ########################## Middle Bottom Frame ##########################################\n \n self.middleBot_frame = QtGui.QFrame()\n self.middleBot_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)\n layout.addWidget(self.middleBot_frame)\n \n self.middleBot_frame.setLayout(QtGui.QHBoxLayout())\n \n mbl = FrameLabel(\"Shader_label\", \"Shaders\", self.middleBot_frame)\n mbl.frameLabelVarName.mouseReleaseEvent = self.midBotToggle\n\n self.midBotListCheckBox = {}\n \n for x in self.middleBotList:\n cb = UtilCreateCheckBox(x, x, self.middleBot_frame) \n self.midBotListCheckBox[x] = cb.buttonVarName\n setattr(self, x, cb)\n \n self.cbButtonList.update(self.midBotListCheckBox)\n \n ############################ Bottom Frame ##########################################\n \n self.bottom_frame = QtGui.QFrame()\n self.bottom_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)\n layout.addWidget(self.bottom_frame)\n\n self.bottom_frame.setLayout(QtGui.QHBoxLayout())\n \n bl = FrameLabel(\"Util_label\", \"Utilities\", self.bottom_frame)\n \n bl.frameLabelVarName.mouseReleaseEvent = self.bottomToggle\n \n self.bottomListCheckBox = {}\n \n for x in self.bottomList:\n cb = UtilCreateCheckBox(x, x, self.bottom_frame)\n self.bottomListCheckBox[x] = cb.buttonVarName\n setattr(self, x, cb)\n \n self.cbButtonList.update(self.bottomListCheckBox)\n\n ########### User Export Buttons #################################################\n \n exportLayout = QtGui.QHBoxLayout()\n \n layout.addLayout(exportLayout)\n \n spacer4 = QtGui.QSpacerItem(125,50)\n exportLayout.addSpacerItem(spacer4)\n \n exportLabel = QtGui.QLabel(\"Add User Shader\")\n exportLayout.addWidget(exportLabel)\n font2 = QtGui.QFont()\n font2.setBold(True)\n font2.setPointSize(12)\n exportLabel.setFont(font2)\n exportLabel.setMaximumWidth(150)\n spacer18 = QtGui.QSpacerItem(10,0)\n exportLayout.addSpacerItem(spacer18)\n \n self.exportWindow = QtGui.QLineEdit()\n self.exportWindow.setMaximumWidth(200)\n self.exportWindow.setMaximumHeight(20)\n exportLayout.addWidget(self.exportWindow)\n \n spacer17 = QtGui.QSpacerItem(10,0)\n exportLayout.addSpacerItem(spacer17)\n \n exportButton = QtGui.QPushButton(\"Export\")\n exportButton.setMaximumWidth(100)\n exportLayout.addWidget(exportButton)\n exportButton.clicked.connect(self.exportNetwork) ## clicked\n \n deleteButton = QtGui.QPushButton(\"Delete\")\n deleteButton.setMaximumWidth(100)\n exportLayout.addWidget(deleteButton)\n deleteButton.clicked.connect(self.youSure) ## clicked\n \n spacer5 = QtGui.QSpacerItem(125,0)\n exportLayout.addSpacerItem(spacer5) \n \n ############################################################################\n \n radioLayout2 = QtGui.QHBoxLayout()\n layout.addLayout(radioLayout2)\n \n spacer6 = QtGui.QSpacerItem(150,0)\n radioLayout2.addSpacerItem(spacer6)\n \n radioLabel2 = QtGui.QLabel(\"Export to\")\n radioLayout2.addWidget(radioLabel2)\n font2 = QtGui.QFont()\n font2.setBold(True)\n font2.setPointSize(12)\n radioLabel2.setFont(font2)\n radioLabel2.setMaximumWidth(100)\n \n radioGroup2 = QtGui.QButtonGroup(self)\n \n self.showRadio2 = QtGui.QRadioButton(\"Show\")\n self.showRadio2.setMaximumWidth(50)\n self.showRadio2.setMinimumWidth(50)\n self.showRadio2.toggled.connect(self.createUserCheckboxes) ## clicked \n radioGroup2.addButton(self.showRadio2)\n\n \n self.seqRadio2 = QtGui.QRadioButton(\"Seq\")\n self.seqRadio2.setMaximumWidth(50)\n self.seqRadio2.setMinimumWidth(50)\n self.seqRadio2.toggled.connect(self.createUserCheckboxes) ## clicked \n radioGroup2.addButton(self.seqRadio2)\n \n self.shotRadio2 = QtGui.QRadioButton(\"Shot\")\n self.shotRadio2.setMaximumWidth(50)\n self.shotRadio2.setMinimumWidth(50)\n self.shotRadio2.toggled.connect(self.createUserCheckboxes) ## clicked \n radioGroup2.addButton(self.shotRadio2)\n \n self.personalRadio2 = QtGui.QRadioButton(\"Personal\")\n self.personalRadio2.setMaximumWidth(50)\n self.personalRadio2.setMinimumWidth(75)\n self.personalRadio2.toggled.connect(self.createUserCheckboxes) ## clicked \n radioGroup2.addButton(self.personalRadio2)\n \n radioLayout2.addWidget(self.showRadio2)\n radioLayout2.addWidget(self.seqRadio2)\n radioLayout2.addWidget(self.shotRadio2)\n radioLayout2.addWidget(self.personalRadio2)\n \n self.showRadio2.setChecked(True)\n \n spacer7 = QtGui.QSpacerItem(250,0)\n radioLayout2.addSpacerItem(spacer7) \n\n ########## User Frame #########################################################\n \n self.user_frame = QtGui.QFrame()\n self.user_frame.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)\n layout.addWidget(self.user_frame)\n\n self.user_frame.setLayout(QtGui.QHBoxLayout())\n \n ul = FrameLabel(\"User_label\", \"User\", self.user_frame)\n \n ul.frameLabelVarName.mouseReleaseEvent = self.userToggle\n \n try: \n self.userInfile = open('%s/userExport.yml' % getShowInfo.jobPath)#\n self.userInSettings = yaml.load(self.userInfile)\n self.userInfile.close()\n \n for x in self.userInSettings.keys():\n cb = UtilCreateCheckBox(x, x, self.user_frame)\n self.userListCheckBox[x] = cb.buttonVarName\n #setattr(self, x, cb) \n self.cbButtonList.update(self.userListCheckBox)\n \n except: pass\n \n ######################### Un/Check All buttons ##################################################\n \n allCheckLayout = QtGui.QHBoxLayout()\n layout.addLayout(allCheckLayout)\n \n self.checkAll_button = QtGui.QPushButton(\"Check All\")\n allCheckLayout.layout().addWidget(self.checkAll_button)\n self.checkAll_button.clicked.connect(self.checkAllFunction) ## clicked\n \n self.checkNone_button = QtGui.QPushButton(\"Check None\")\n allCheckLayout.layout().addWidget(self.checkNone_button)\n self.checkNone_button.clicked.connect(self.checkNoneFunction) ## clicked\n \n ####################### Import button #####################################################\n \n self.import_button = QtGui.QPushButton(\"Import\")\n layout.addWidget(self.import_button)\n self.import_button.setMinimumHeight(50) \n self.import_button.clicked.connect(self.importButtonFunction) ## clicked\n \n ####################### Output Window ####################################################\n \n self.outWindow = QtGui.QTextEdit()\n self.outWindow.setReadOnly(True)\n layout.addWidget(self.outWindow)\n self.outWindow.setMaximumHeight(275)\n \n ############################################################################################\n \n self.loadPreset() ## Loads show checkbox state on startup\n \n self.setLayout(layout) # add main layout itself to this dialog\n \n #########################################################################\n ################### Functions ###########################################\n ######################################################################### \n \n ################### Save/Load Preset Functions ########################## \n \n def savePreset(self): ## Save Button Function\n \n for x,y in self.cbButtonList.iteritems():\n \n x=str(x)\n if y.isChecked() == True:\n cbState = True\n else:\n cbState = False\n self.getState[x] = cbState\n \n if self.showRadio.isChecked() == True: \n if not os.path.exists(getShowInfo.jobPath):\n os.makedirs(getShowInfo.jobPath) \n with open('%s/utilToolbox.yml' % getShowInfo.jobPath, 'w') as outfile:\n outfile.write(yaml.dump(self.getState, default_flow_style=False)) \n showConfig = \"<font color=yellow>Saved SHOW preset.</font>\"\n self.outWindow.setText(showConfig)\n \n elif self.seqRadio.isChecked() == True: \n if not os.path.exists(getShowInfo.seqPath):\n os.makedirs(getShowInfo.seqPath) \n with open('%s/utilToolbox.yml' % getShowInfo.seqPath, 'w') as outfile:\n outfile.write(yaml.dump(self.getState, default_flow_style=False)) \n seqConfig = \"<font color=yellow>Saved SEQ preset.</font>\"\n self.outWindow.setText(seqConfig)\n \n elif self.shotRadio.isChecked() == True: \n if not os.path.exists(getShowInfo.shotPath):\n os.makedirs(getShowInfo.shotPath) \n with open('%s/utilToolbox.yml' % getShowInfo.shotPath, 'w') as outfile:\n outfile.write(yaml.dump(self.getState, default_flow_style=False)) \n shotConfig = \"<font color=yellow>Saved SHOT preset.</font>\"\n self.outWindow.setText(shotConfig)\n \n elif self.personalRadio.isChecked() == True: \n if not os.path.exists(getShowInfo.personalPath):\n os.makedirs(getShowInfo.personalPath) \n with open('%s/utilToolbox.yml' % getShowInfo.personalPath, 'w') as outfile:\n outfile.write(yaml.dump(self.getState, default_flow_style=False)) \n personalConfig = \"<font color=yellow>Saved PERSONAL preset.</font>\"\n self.outWindow.setText(personalConfig) \n \n \n############### Creates Checknboxes in the User Frame ##############################\n\n def createUserCheckboxes(self):\n \n for x,y in self.userListCheckBox.iteritems():\n y.setParent(None)\n \n self.userInSettings = {}\n self.userListCheckBox = {}\n \n if self.showRadio2.isChecked() == True:\n whichPath = getShowInfo.jobPath\n \n elif self.seqRadio2.isChecked() == True:\n whichPath = getShowInfo.seqPath\n \n elif self.shotRadio2.isChecked() == True:\n whichPath = getShowInfo.shotPath\n \n elif self.personalRadio2.isChecked() == True:\n whichPath = getShowInfo.personalPath\n \n try: \n self.userInfile = open('%s/userExport.yml' % whichPath)\n self.userInSettings = yaml.load(self.userInfile)\n self.userInfile.close()\n \n for x,y in self.userInSettings.iteritems():\n\n cb = UtilCreateCheckBox(x, x, self.user_frame)\n self.userListCheckBox[x] = cb.buttonVarName\n setattr(self, x, cb)\n \n self.cbButtonList.update(self.userListCheckBox)\n \n except: pass\n \n################## Load CheckState Function -- Called when top radio button changes #################################################### \n \n def loadPreset(self): ## Load Button Function\n \n getShowInfo()\n \n try:\n if self.showRadio.isChecked() == True:\n infile = open('%s/utilToolbox.yml' % getShowInfo.jobPath)\n showConfig = \"<font color=yellow>Loaded SHOW preset.</font>\"\n self.outWindow.setText(showConfig) \n elif self.seqRadio.isChecked() == True:\n infile = open('%s/utilToolbox.yml' % getShowInfo.seqPath)\n seqConfig = \"<font color=yellow>Loaded SEQ preset.</font>\"\n self.outWindow.setText(seqConfig) \n elif self.shotRadio.isChecked() == True:\n infile = open('%s/utilToolbox.yml' % getShowInfo.shotPath)\n shotConfig = \"<font color=yellow>Loaded SHOT preset.</font>\"\n self.outWindow.setText(shotConfig) \n \n elif self.personalRadio.isChecked() == True:\n infile = open('%s/utilToolbox.yml' % getShowInfo.personalPath)\n personalConfig = \"<font color=yellow>Loaded PERSONAL preset.</font>\"\n self.outWindow.setText(personalConfig) \n \n inSettings = yaml.load(infile)\n infile.close()\n \n for x in self.cbButtonList:\n if x in inSettings.keys():\n if inSettings[x]:\n try:\n exec('self.%s.buttonVarName.setCheckState(QtCore.Qt.Checked)' % (x))\n except: pass\n else:\n try:\n exec('self.%s.buttonVarName.setCheckState(QtCore.Qt.Unchecked)' % (x))\n except: pass \n \n except:\n noConfig = \"<font color=red>NO CONFIG FILE EXISTS</font>\"\n self.outWindow.setText(noConfig)\n \n ############# Export Button Function ###############################################################\n \n def exportNetwork(self):\n \n if bool(self.userInSettings):\n self.exportShadDict = self.userInSettings\n \n exportText = self.exportWindow.text()\n self.userList2 = []\n self.userList2.append(exportText)\n \n if len(exportText) > 0 :\n \n for x in self.userList2:\n if x not in self.userListCheckBox:\n cb = UtilCreateCheckBox(x, x, self.user_frame)\n self.userListCheckBox[x] = cb.buttonVarName\n setattr(self, x, cb)\n \n self.cbButtonList.update(self.userListCheckBox)\n \n getShowInfo()\n \n if self.showRadio2.isChecked() == True:\n \n self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.jobPath,exportText)\n \n if not os.path.exists(getShowInfo.jobPath):\n os.makedirs(getShowInfo.jobPath) \n exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.jobPath,exportText), es=True, typ=\"mayaBinary\")\n with open('%s/userExport.yml' % (getShowInfo.jobPath), 'w') as outfile:\n outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False)) \n showConfig = \"<font color=yellow>Saved SHOW network '%s'.</font>\" % exportText\n self.outWindow.setText(showConfig)\n self.exportShadDict = {}\n \n elif self.seqRadio2.isChecked() == True:\n \n self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.seqPath,exportText)\n \n if not os.path.exists(getShowInfo.seqPath):\n os.makedirs(getShowInfo.seqPath) \n exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.seqPath,exportText), es=True, typ=\"mayaBinary\")\n with open('%s/userExport.yml' % (getShowInfo.seqPath), 'w') as outfile:\n outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False)) \n seqConfig = \"<font color=yellow>Saved SEQ network '%s'.</font>\" % exportText\n self.outWindow.setText(seqConfig)\n self.exportShadDict = {}\n \n elif self.shotRadio2.isChecked() == True:\n \n self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.shotPath,exportText)\n \n if not os.path.exists(getShowInfo.shotPath):\n os.makedirs(getShowInfo.shotPath) \n exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.shotPath,exportText), es=True, typ=\"mayaBinary\")\n with open('%s/userExport.yml' % (getShowInfo.shotPath), 'w') as outfile:\n outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False)) \n shotConfig2 = \"<font color=yellow>Saved SHOT network '%s'.</font>\" % exportText\n self.outWindow.setText(shotConfig2)\n self.exportShadDict = {}\n \n elif self.personalRadio2.isChecked() == True: \n \n self.exportShadDict[exportText] = '%s/%s.mb' % (getShowInfo.personalPath,exportText)\n \n if not os.path.exists(getShowInfo.personalPath):\n os.makedirs(getShowInfo.personalPath) \n exportedFile = cmds.file('%s/%s.mb' % (getShowInfo.personalPath,exportText), es=True, typ=\"mayaBinary\")\n with open('%s/userExport.yml' % (getShowInfo.personalPath), 'w') as outfile:\n outfile.write(yaml.dump(self.exportShadDict, default_flow_style=False)) \n #print exportShadDict \n personalConfig2 = \"<font color=yellow>Saved PERSONAL network'%s'.</font>\" % exportText\n self.outWindow.setText(personalConfig2)\n self.exportShadDict = {}\n \n else:\n noTextName = \"<font color=red>Network needs to be named before it can be exported</font>\"\n self.outWindow.setText(noTextName)\n\n ################# Import Button Function #################################################################\n \n def importButtonFunction(self): ## Import Button Function\n \n #### sets to default layer and enables Vray is not already enabled#######\n cmds.editRenderLayerGlobals(currentRenderLayer='defaultRenderLayer')\n if cmds.pluginInfo('vrayformaya', q=True, loaded=True) == False:\n cmds.loadPlugin('vrayformaya', qt=True)\n \n output = []\n warningSphere = []\n warningPlate = []\n \n getShotCamInfo()\n \n for x,y in self.cbButtonList.iteritems():\n \n if x == \"Red\" and y.isChecked() == True:\n CreateRGBLightMaterials('RED',1,0,0)\n output.append(\"Created Red VRay Light Material\")\n \n if x == \"Green\" and y.isChecked() == True:\n CreateRGBLightMaterials('GREEN',0,1,0)\n output.append(\"Created Green VRay Light Material\")\n \n if x == \"Blue\" and y.isChecked() == True:\n CreateRGBLightMaterials('BLUE',0,0,1)\n output.append(\"Created Blue VRay Light Material\")\n \n if x == \"White\" and y.isChecked() == True:\n CreateRGBLightMaterials('WHITE',1,1,1)\n output.append(\"Created White VRay Light Material\")\n \n if x == \"Black\" and y.isChecked() == True:\n CreateRGBLightMaterials('BLACK',0,0,0)\n output.append(\"Created Black VRay Light Material\")\n \n if x == \"Shadow\" and y.isChecked() == True:\n CreateRenderElements('shadow')\n output.append(\"Created Matte Shadow Render Element\")\n \n if x == \"Contact_Shadow\" and y.isChecked() == True:\n CreateCatchers('contact_shadow')\n CreateRenderElements('contactShadow')\n output.append(\"Created Contact Shadow Render Element\")\n output.append(\"Created Conatct Shadow VRay Dirt Texture\")\n \n if x == \"Reflection_Occ\" and y.isChecked() == True:\n CreateCatchers('reflection')\n CreateRenderElements('refl_occ')\n output.append(\"Created Reflection Occlusion VRay Dirt Texture\")\n output.append(\"Created Refleection Occlusion Render Element\") \n \n if x == \"Fresnel\" and y.isChecked() == True:\n CreateRenderElements('fresnel')\n output.append(\"Created VRay Frensel Utility\")\n output.append(\"Created Fresnel Render Element\")\n \n if x == \"Shadow_Catcher\" and y.isChecked() == True:\n CreateCatchers('shadow')\n output.append(\"Created Shadow Catcher Vray Mtl\")\n \n if x == \"UV\" and y.isChecked() == True:\n CreateUV()\n output.append(\"Created UV pass Render Element\")\n \n if x == \"Plate_Projection\" and y.isChecked() == True:\n PlateProject()\n output.append(\"Created Plate Projection Shader\")\n if not cmds.objExists(getShotCamInfo.shotCam): \n warningPlate.append(\"Could not link plate projection node to shotcam. Shotcam does not exist. Import the shotcam and run the tool again with this selection to fix the camera attachments.\")\n \n if x == \"Reflection_Catcher\" and y.isChecked() == True:\n CreateCatchers('reflection')\n output.append(\"Created Reflection Catcher Vray Mtl\")\n \n if x == \"Ref_Spheres\" and y.isChecked() == True:\n CreateRefSphere()\n output.append(\"Created Reference Spheres and Color Chart\")\n if not cmds.objExists(getShotCamInfo.shotCam):\n print 'could not find the shotcam' \n warningSphere.append(\"Could not position and constrain ref spheres shotcam. Shotcam does not exist. Import the shotcam and run the tool again with this selection to fix the camera attachments.\")\n \n for x,y in self.userListCheckBox.iteritems():\n if y.isChecked() == True:\n cmds.file('%s' % (self.userInSettings[x]), i=True, ns='%s' % x)\n output.append(\"Loaded '%s'\" % x)\n \n ############# Output Statements ################################# \n \n conformOutput = '\\n'.join(output) ## reformats output list\n conformSphereWarn = '\\n'.join(warningSphere) ## reformats output list\n conformPlateWarn = '\\n'.join(warningPlate) ## reformats output list\n \n warningSphereOut = \"<font color=red>\" + conformSphereWarn + \"</font>\" ## turn that string red\n warningPlateOut = \"<font color=red>\" + conformPlateWarn + \"</font>\" ## turn that string red\n \n self.outWindow.setText(conformOutput) ## prints output in output box\n self.outWindow.append(warningSphereOut) ## prints warnings in output box \n self.outWindow.append(warningPlateOut) ## prints warnings in output box \n \n ############# Un/Check All Functions ######################\n \n def checkAllFunction(self): ## Check All Button Function\n for x,y in self.cbButtonList.iteritems():\n y.setChecked(True)\n \n def checkNoneFunction(self): ## Check None Button Function\n for x,y in self.cbButtonList.iteritems():\n y.setChecked(False)\n\n################### Delete User Network Button -- Delete Button Function ############################################# \n \n def deleteUser(self):\n \n if self.showRadio2.isChecked() == True:\n whichPath = getShowInfo.jobPath\n \n elif self.seqRadio2.isChecked() == True:\n whichPath = getShowInfo.seqPath\n \n elif self.shotRadio2.isChecked() == True:\n whichPath = getShowInfo.shotPath\n \n elif self.personalRadio2.isChecked() == True:\n whichPath = getShowInfo.personalPath\n \n for x,y in self.userListCheckBox.iteritems():\n if y.isChecked() == True:\n os.remove(self.userInSettings[x]) \n y.setParent(None)\n del self.userInSettings[x] \n with open('%s/userExport.yml' % (whichPath), 'w') as outfile:\n outfile.write(yaml.dump(self.userInSettings, default_flow_style=False))\n \n########## Pop Up Window Launcher #####################################################################\n\n def youSure(self):\n \n whichBoxes = []\n for x,y in self.userListCheckBox.iteritems():\n if y.isChecked() == True:\n newX=str(x)\n whichBoxes.append(newX)\n #whichBoxes = ''.join(whichBoxes)\n \n msgBox = QtGui.QMessageBox()\n msgBox.setText(\"Are you sure you want to remove %s???\" % whichBoxes)\n msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)\n ret = msgBox.exec_()\n\n if ret == msgBox.Ok:\n self.deleteUser()\n \n################ Toggle each line Functions ###################################\n \n def topToggle(self, event): ## Top list CB toggle\n flipRow(self.topListCheckBox) \n\n def midTopToggle(self, event): ## midTop list CB toggle\n flipRow(self.midTopListCheckBox) \n \n def midBotToggle(self, event): ## midBot list CB toggle\n flipRow(self.midBotListCheckBox) \n\n def bottomToggle(self, event): ## Bottom list CB toggle\n flipRow(self.bottomListCheckBox)\n \n def userToggle(self, event): ## User list CB toggle\n flipRow(self.userListCheckBox)\n\ndef flipRow(whichList): ## toggle row of checkboxes if you click on the label\n if whichList.values()[0].isChecked() == True:\n for x,y in whichList.iteritems():\n y.setChecked(False)\n else:\n for x,y in whichList.iteritems():\n y.setChecked(True)\n \n############ Get Show/Seq/Shot/User Information ################################################### \n \ndef getShowInfo():\n\n tackOn = '/TECH/config/toolbox'\n tackOnJob = '/TECH/lib/maya/config/toolbox'\n\n tackOnPersonal = '/config/toolbox'\n \n getShowInfo.shotPath = os.environ.get('M_SHOT_PATH') + tackOn\n getShowInfo.seqPath = os.environ.get('M_SEQUENCE_PATH') + tackOn\n getShowInfo.jobPath = os.environ.get('M_JOB_PATH') + tackOnJob\n getShowInfo.personalPath = os.environ.get('HOME') + tackOnPersonal\n \n################## Get ShotCam ##################################################\ndef getShotCamInfo():\n \n shotcamList = ['shotcam*:shot_camera']\n getCamList = cmds.ls(shotcamList)\n \n if cmds.objExists('shotcam1:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam1:shot_camera')[0]\n elif cmds.objExists('shotcam_alexaAna1:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam_alexaAna1:shot_camera')[0]\n elif cmds.objExists('shotcam_alexaHD1:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam_alexaHD1:shot_camera')[0]\n elif cmds.objExists('shotcam_5DMark3HD1:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam_5DMark3HD1:shot_camera')[0] \n elif cmds.objExists('shotcam_redDragonX1:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam_redDragonX1:shot_camera')[0]\n elif cmds.objExists('shotcam_redDragonBarge1:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam_redDragonBarge1:shot_camera')[0] \n elif cmds.objExists('shotcam_fg:shot_camera'):\n getShotCamInfo.shotCam = cmds.ls('shotcam_fg:shot_camera')[0]\n \n \n \n\n #if shotcamList > 1:\n #if 'shotcam_fg:shot_camera' in getCamList:\n #getCamList.remove('shotcam_fg:shot_camera')\n\n #getShotCamInfo.shotCam = getCamList[0]\n #print getShotCamInfo.shotCam\n \ngetShotCamInfo()\n\n \n################## Create Checkbox Class ########################################\n \nclass UtilCreateCheckBox(object):\n def __init__(self, buttonVarName, buttonLabelName, frame):\n \n self.buttonVarName = QtGui.QCheckBox(buttonLabelName)\n frame.layout().addWidget(self.buttonVarName)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.buttonVarName.setFont(font)\n self.buttonVarName.setChecked(True)\n\n################# Create Frames Class ###########################################\n \nclass FrameLabel(object):\n def __init__(self, frameLabelVarName, frameLabelName, frame):\n \n self.frameLabelName = frameLabelName\n self.frame = frame\n \n self.frameLabelVarName = QtGui.QLabel(frameLabelName)\n frame.layout().addWidget(self.frameLabelVarName)\n font = QtGui.QFont()\n font.setBold(True)\n font.setPointSize(15)\n self.frameLabelVarName.setFont(font)\n \n \n########################################################################\n########################################################################\n########################################################################\n############################### BACK END ###############################\n########################################################################\n########################################################################\n########################################################################\n\n\nclass CreateRGBLightMaterials(object): \n def __init__(self, shaderName, R, G, B):\n \n self.shaderName = shaderName\n shaderSGName = shaderName + \"_SG\"\n self.R = R\n self.G = G\n self.B = B\n \n if not cmds.objExists(shaderName):\n mtlName = cmds.shadingNode('VRayLightMtl', asShader=True, name=shaderName)\n cmds.setAttr('%s.color' % (mtlName), R,G,B, type='double3')\n cmds.setAttr('%s.emitOnBackSide' % (mtlName), 1)\n mtlSG = cmds.sets(name = shaderSGName, renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (mtlName) ,'%s.surfaceShader' % (mtlSG))\n \n else:\n mtlName = cmds.ls(shaderName)[0]\n\nclass CreateCatchers(object): \n def __init__(self, type):\n self.type = type\n ## type meaning 'shadow' or 'reflection' catcher\n \n if type.lower() == 'shadow':\n if not cmds.objExists('SHADOW_CATCHER'):\n shdCatcher = cmds.shadingNode('VRayMtl', asShader=True, name='SHADOW_CATCHER') \n shdCatcherSG = cmds.sets(name = 'SHADOW_CATCHER_SG', renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (shdCatcher) ,'%s.surfaceShader' % (shdCatcherSG))\n cmds.setAttr('%s.reflectionColorAmount' % (shdCatcher), 0)\n cmds.setAttr('%s.diffuseColorAmount' % (shdCatcher), 1)\n cmds.setAttr('%s.brdfType' % (shdCatcher), 0)\n cmds.setAttr('%s.useFresnel' % (shdCatcher), 0)\n ## creates shadow catching VRayMtl\n \n if type.lower() == 'contact_shadow': \n if not cmds.objExists('CONTACT_SHADOW_CATCHER'):\n contactShadCatcher = cmds.shadingNode('VRayDirt', asTexture=True, name='CONTACT_SHADOW_CATCHER') \n cmds.setAttr('%s.blackColor' % (contactShadCatcher), 1,1,1, type='double3')\n cmds.setAttr('%s.whiteColor' % (contactShadCatcher), 0,0,0, type='double3')\n cmds.setAttr('%s.radius' % (contactShadCatcher), 10)\n cmds.setAttr('%s.ignoreSelfOcclusion' % (contactShadCatcher), 1)\n cmds.setAttr('%s.resultAffectInclusive' % (contactShadCatcher), 0)\n ## creates VrayDirt used for ambient occlusion\n \n elif type.lower() == 'reflection':\n if not cmds.objExists('REFL_CATCHER'):\n mirrorMtl = cmds.shadingNode('VRayMtl', asShader=True, name='REFL_CATCHER') \n mirrorMtlSG = cmds.sets(name = 'REFL_CATCHER_SG', renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (mirrorMtl) ,'%s.surfaceShader' % (mirrorMtlSG)) \n cmds.setAttr('%s.color' % (mirrorMtl), 0,0,0, type='double3')\n cmds.setAttr('%s.reflectionColor' % (mirrorMtl), 1,1,1, type='double3')\n cmds.setAttr('%s.reflectionColorAmount' % (mirrorMtl), 1)\n cmds.setAttr('%s.diffuseColorAmount' % (mirrorMtl), 0)\n cmds.setAttr('%s.useFresnel' % (mirrorMtl), 0)\n mirrorOccl = cmds.shadingNode('VRayDirt', asTexture=True, name='MIRROR_REFLOCC')\n cmds.setAttr('%s.blackColor' % (mirrorOccl), 1,1,1, type='double3')\n cmds.setAttr('%s.whiteColor' % (mirrorOccl), 0,0,0, type='double3')\n cmds.setAttr('%s.radius' % (mirrorOccl), 1000)\n cmds.setAttr('%s.occlusionMode' % (mirrorOccl), 2)\n cmds.connectAttr('%s.outColor' % (mirrorOccl), '%s.reflectionColor' % (mirrorMtl))\n cmds.connectAttr('%s.reflectionGlossiness' % (mirrorMtl), '%s.glossiness' % (mirrorOccl)) \n mkbrdfTypeOffset = cmds.shadingNode('plusMinusAverage', asUtility=True, name='brdfOffset')\n cmds.connectAttr('%s.brdfType' % (mirrorMtl), '%s.input1D[0]' % (mkbrdfTypeOffset))\n cmds.setAttr('%s.input1D[1]' % (mkbrdfTypeOffset), 1)\n #cmds.connectAttr('%s.output1D' % (mkbrdfTypeOffset), '%s.occlusionMode' % (mirrorOccl))\n cmds.expression(s='MIRROR_REFLOCC.occlusionMode = brdfOffset.output1D;', ae=True)\n cmds.connectAttr('%s.reflectionSubdivs' % (mirrorMtl), '%s.subdivs' % (mirrorOccl))\n ## creates relfection catching VrayMtl and VRay dirt for an RO\n \n MIRROR_REFLOCC.occlusionMode = brdfOffset.output1D;\n\n## example creation ## \n## createReflectionCatcher = CreateCatchers('reflection') \n\nclass CreateRenderElements(object):\n def __init__(self,type):\n \n self.type = type\n \n if type.lower() == 'shadow':\n if not cmds.objExists('vrayRE_MatteShadow'):\n vfb.matteShadow('vrayRE_MatteShadow', enabled=False)\n ## creates cast shadow render element\n \n if type.lower() == 'contactshadow':\n if not cmds.objExists('vrayRE_ContactShadow'):\n if cmds.objExists('CONTACT_SHADOW_CATCHER'):\n vfb.extraTex('vrayRE_ContactShadow', 'CONTACT_SHADOW_CATCHER', explicit_channel='contactShadow', enabled=False)\n ## creates contact shadow render element\n \n if type.lower() == 'fresnel': \n if not cmds.objExists('vrayRE_Fresnel'):\n createFresnel = cmds.shadingNode('VRayFresnel', asTexture=True, name='VrayFresnel')\n createFresnelTwoD = cmds.shadingNode('place2dTexture', asUtility=True, name='place2dFresnel')\n cmds.connectAttr('%s.outUV' % (createFresnelTwoD), '%s.uvCoord' % (createFresnel))\n cmds.connectAttr('%s.outUvFilterSize' % (createFresnelTwoD), '%s.uvFilterSize' % (createFresnel))\n vfb.extraTex('vrayRE_Fresnel', 'VrayFresnel', explicit_channel='fresnel', enabled=False)\n ## creates fresnel render element\n \n if type.lower() == 'refl_occ':\n if not cmds.objExists('vrayRE_reflectionOcclusion'):\n if cmds.objExists('MIRROR_REFLOCC'):\n vfb.extraTex('vrayRE_reflectionOcclusion', 'MIRROR_REFLOCC', explicit_channel='reflectionOcclusion', enabled=False)\n ## creates contact shadow render element\n \n## example creation ## \n## createShadowRE = CreateRenderElements('contactShadow')\n \nclass PlateProject(object): \n def __init__(self):\n \n getShotCamInfo()\n \n projectCam = getShotCamInfo.shotCam\n if not cmds.objExists('plateProject'):\n projShader = cmds.shadingNode('VRayMtl', asShader=True, name='plateProject') \n projShaderSG = cmds.sets(name = 'plateProject_SG', renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (projShader) ,'%s.surfaceShader' % (projShaderSG)) \n cmds.setAttr('%s.useFresnel' % (projShader), 0)\n cmds.setAttr('%s.diffuseColorAmount' % (projShader), 0)\n ## creates shader\n \n plateTexture = cmds.shadingNode('file', asTexture=True, name='plateTexture')\n cmds.setAttr('%s.defaultColor' % (plateTexture), 0,0,0, type='double3')\n cmds.setAttr('%s.useFrameExtension' % (plateTexture), 1)\n ## creates texture node\n \n fileProject = cmds.shadingNode('projection', asTexture=True, name='projectNodePlate') \n cmds.setAttr('%s.projType' % (fileProject), 8)\n cmds.setAttr('%s.fitType' % (fileProject), 1)\n cmds.setAttr('%s.fitFill' % (fileProject), 0)\n cmds.setAttr('%s.alphaOffset' % (fileProject), 1)\n cmds.setAttr('%s.defaultColor' % (fileProject), 0,0,0, type='double3')\n ## creates projection node\n \n twoD = cmds.shadingNode('place2dTexture', asUtility=True, name='PlatePlace2d')\n cmds.setAttr('%s.wrapU' % (twoD), 0)\n cmds.setAttr('%s.wrapV' % (twoD), 0)\n ## creates place2D for plate texture\n \n threeD = cmds.shadingNode('place3dTexture', asUtility=True, name='PlatePlace3d')\n ## creates place3D for camera\n \n cmds.connectAttr('%s.outColor' % (fileProject), '%s.illumColor' % (projShader))\n cmds.connectAttr('%s.outColor' % (plateTexture), '%s.image' % (fileProject))\n cmds.connectAttr('%s.worldInverseMatrix' % (threeD), '%s.placementMatrix' % (fileProject))\n cmds.connectAttr('%s.outAlpha' % (fileProject), '%s.opacityMapR' % (projShader))\n cmds.connectAttr('%s.outAlpha' % (fileProject), '%s.opacityMapG' % (projShader))\n cmds.connectAttr('%s.outAlpha' % (fileProject), '%s.opacityMapB' % (projShader))\n ## connects texture, alpha, shader, projection, and 3D placement\n \n place2DConnections = ('coverage', 'translateFrame', 'rotateFrame', 'mirrorU', 'mirrorV', 'stagger', 'wrapU', 'wrapV', 'repeatUV',\n 'offset', 'rotateUV', 'noiseUV', 'vertexUvOne', 'vertexUvTwo', 'vertexUvThree', 'vertexCameraOne') \n for x in place2DConnections:\n cmds.connectAttr('%s.%s' % (twoD, x), '%s.%s' % (plateTexture, x)) \n cmds.connectAttr('%s.outUV' % (twoD), '%s.uv' % (plateTexture))\n cmds.connectAttr('%s.outUvFilterSize' % (twoD), '%s.uvFilterSize' % (plateTexture))\n ## connects place2D for plate texture\n\n if cmds.objExists(projectCam):\n fileProject = cmds.ls('projectNodePlate')[0]\n getIfConnect = cmds.listConnections('%s.linkedCamera' % (fileProject), d=False, s=True)\n if getIfConnect == None:\n cmds.connectAttr('%s' % (projectCam) + 'Shape.message', '%s.linkedCamera' % (fileProject), f=True)\n ## connects shotcam to the proj cam if it exists\n\n## example creation ## \n## createPlateProject = PlateProject() \n \nclass CreateRefSphere(object): \n def __init__(self):\n \n if not cmds.objExists('greyBallShader'):\n diffShader = cmds.shadingNode('VRayMtl', asShader=True, name='greyBallShader')\n diffShaderSG = cmds.sets(name = 'greyBallSG', renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (diffShader) ,'%s.surfaceShader' % (diffShaderSG))\n cmds.setAttr('%s.useFresnel' % (diffShader), 0)\n cmds.setAttr('%s.color' % (diffShader), 0.18,0.18,0.18, type='double3')\n ## creates and assigns grey ball shader \n \n if not cmds.objExists('greyBall'):\n diffBall = cmds.polySphere(name='greyBall', r=2.5)\n cmds.setAttr('%s.translateY' % (diffBall[0]), 6)\n cmds.delete(diffBall, ch=True)\n ## creates grey ball geo\n \n cmds.sets(diffBall[0], e=True, forceElement='greyBallSG')\n ## assigns grey ball shader to geo\n \n if not cmds.objExists('chromeBallShader'):\n refShader = cmds.shadingNode('VRayMtl', asShader=True, name='chromeBallShader')\n refShaderSG = cmds.sets(name = 'chromeBallSG', renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (refShader) ,'%s.surfaceShader' % (refShaderSG))\n cmds.setAttr('%s.useFresnel' % (refShader), 0)\n cmds.setAttr('%s.color' % (refShader), 0, 0, 0, type='double3')\n cmds.setAttr('%s.reflectionColor' % (refShader), 1, 1, 1, type='double3')\n cmds.setAttr('%s.diffuseColorAmount' % (refShader), 0)\n cmds.setAttr('%s.reflectionsMaxDepth' % (refShader), 2)\n ## creates chrome ball shader\n \n if not cmds.objExists('chromeBall'): \n refBall = cmds.polySphere(name='chromeBall', r=2.5)\n cmds.setAttr('%s.translate' % (refBall[0]), 7,6,0)\n cmds.delete(refBall, ch=True) \n ## creates chrome ball geo\n \n cmds.sets(refBall[0], e=True, forceElement='chromeBallSG')\n ## assigns chrome ball shader to geo \n \n colorChartTexturePath = '/jobs/asset_library/sequences/assets/common/pub/hdr_library/ColorChecker_linear_from_Avg_16bit.exr'\n ## color chart texture path\n \n if not cmds.objExists('colorChartShader'):\n chartShader = cmds.shadingNode('VRayLightMtl', asShader=True, name='colorChartShader')\n chartShaderSG = cmds.sets(name = 'chartShaderSG', renderable=True,noSurfaceShader=True,empty=True)\n cmds.connectAttr('%s.outColor' % (chartShader) ,'%s.surfaceShader' % (chartShaderSG))\n cmds.setAttr('%s.emitOnBackSide' % (chartShader), 1)\n ## creates color chart VrayLightMtl\n \n if not cmds.objExists('colorChart'): \n colorChart = cmds.polyPlane(name='colorChart', h=5,w=5,sx=1,sy=1)\n #cmds.setAttr('%s.translate' % (colorChart[0]), 0,6,0)\n cmds.setAttr('%s.rotateX' % (colorChart[0]), 90)\n ## creates color chart geo\n \n cmds.sets(colorChart[0], e=True, forceElement='chartShaderSG')\n ## assigns shader\n \n if not cmds.objExists('chartTexture'):\n chartTexture = cmds.shadingNode('file', asTexture=True, name='chartTexture')\n chartTwoD = cmds.shadingNode('place2dTexture', asUtility=True, name='chartPlace2d') \n chart2DConnections = ('coverage', 'translateFrame', 'rotateFrame', 'mirrorU', 'mirrorV', 'stagger', 'wrapU', 'wrapV', 'repeatUV',\n 'offset', 'rotateUV', 'noiseUV', 'vertexUvOne', 'vertexUvTwo', 'vertexUvThree', 'vertexCameraOne') \n for x in chart2DConnections:\n cmds.connectAttr('%s.%s' % (chartTwoD, x), '%s.%s' % (chartTexture, x)) \n cmds.connectAttr('%s.outUV' % (chartTwoD), '%s.uv' % (chartTexture))\n cmds.connectAttr('%s.outUvFilterSize' % (chartTwoD), '%s.uvFilterSize' % (chartTexture))\n cmds.connectAttr('%s.outColor' % (chartTexture), '%s.color' % (chartShader))\n ## creates and connects file texture node\n \n cmds.setAttr('%s.fileTextureName' % (chartTexture), colorChartTexturePath, type='string')\n ############ So dumb but I can't get the file tetxture path to fully eval without selecting the file node ###################\n cmds.select(chartTexture, r=True)\n ## feeds in colro chart texture path\n \n if not cmds.objExists('RefSphere_GRP'):\n refSetupGroupName = 'RefSphere_GRP'\n refSetupTransGroup = 'TranslateThis'\n refSetupGroupMembers = (colorChart[0], refBall[0], diffBall[0])\n translateGroup = cmds.group(refSetupGroupMembers, name=refSetupTransGroup)\n refSetupGroup = cmds.group(translateGroup, name=refSetupGroupName)\n \n getShotCamInfo()\n \n if getShotCamInfo.shotCam > 0: \n if cmds.objExists(getShotCamInfo.shotCam):\n refSetupGroup = cmds.ls('RefSphere_GRP')[0]\n translateGroup = cmds.ls('TranslateThis')[0] \n getIfConnect = cmds.listConnections('%s.tx' % (refSetupGroup), d=False, s=True)\n if getIfConnect == None:\n cmds.parentConstraint(getShotCamInfo.shotCam, refSetupGroup, mo=False)\n cmds.setAttr('%s.translate' % (translateGroup), 0, 0, -100)\n ## creates groups and constrains to camera\n \n## example creation ## \n## createRefSpheres = CreateRefSphere()\n\nclass CreateUV(object):\n def __init__(self): \n if not cmds.objExists('vrayRE_RE_UV'):\n uvSampler = cmds.shadingNode('samplerInfo', asUtility=True, name='UVsampler') \n uvExtraTex = vfb.extraTex('vrayRE_UV', uvSampler, explicit_channel='UV', enabled=False)\n cmds.connectAttr('%s.uCoord' % uvSampler, 'vrayRE_UV.vray_texture_extratexR') \n cmds.connectAttr('%s.vCoord' % uvSampler, 'vrayRE_UV.vray_texture_extratexG') \n \n## example creation ## \n## createRefSpheres = CreateUV() \n \n \n \n\ndef launchUI():\n global vrayToolBoxUtil\n \n # will try and close the ui if it exists\n try: vrayToolBoxUtil.close()\n except: pass\n \n vrayToolBoxUtil = UtilityToolBoxUI()\n vrayToolBoxUtil.show()\n vrayToolBoxUtil.raise_() \n \n################## Show Window ####################################### \n \nif __name__ == \"__main__\": \n launchUI()\n"
] | true |
98,402 |
41a35b4f8d2c65ce5b840e04e53eef318398a601
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-05 10:06:22
# @Author : lczean ([email protected])
# @Link : https://github.com/lczean
# @Version : 1.0
# @File : io.py
'''
# 文件IO
'''
# write a file
with open("text.txt", "wt") as out_file:
out_file.write("该文本会写入到文件中\nJust a test!")
# read a file
with open("text.txt", "rt") as in_file:
TEXT = in_file.read()
print(TEXT)
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-05-05 10:06:22\n# @Author : lczean ([email protected])\n# @Link : https://github.com/lczean\n# @Version : 1.0\n# @File : io.py\n\n'''\n# 文件IO\n'''\n\n# write a file\nwith open(\"text.txt\", \"wt\") as out_file:\n out_file.write(\"该文本会写入到文件中\\nJust a test!\")\n\n# read a file\nwith open(\"text.txt\", \"rt\") as in_file:\n TEXT = in_file.read()\n\nprint(TEXT)\n",
"<docstring token>\nwith open('text.txt', 'wt') as out_file:\n out_file.write('该文本会写入到文件中\\nJust a test!')\nwith open('text.txt', 'rt') as in_file:\n TEXT = in_file.read()\nprint(TEXT)\n",
"<docstring token>\n<code token>\n"
] | false |
98,403 |
22cc2d3764b7d6fdf0f2622f737863ea16acd618
|
#!/usr/bin/python
# Hello Data Science python program
print "Hello Data Science!";
|
[
"#!/usr/bin/python\n\n# Hello Data Science python program\n\nprint \"Hello Data Science!\";\n"
] | true |
98,404 |
9c628f0b05a38480034b7b58605a7e0f65d641e5
|
num_array = list()
num = input("Enter how many elements you want:")
print ('Enter numbers in array: ')
for i in range(int(num)):
n = input("num :")
num_array.append(int(n))
for i in range(int(num)):
print (num_array[i])
print('Arrays')
|
[
"num_array = list()\nnum = input(\"Enter how many elements you want:\")\nprint ('Enter numbers in array: ')\nfor i in range(int(num)):\n n = input(\"num :\")\n num_array.append(int(n))\nfor i in range(int(num)):\n print (num_array[i])\nprint('Arrays')",
"num_array = list()\nnum = input('Enter how many elements you want:')\nprint('Enter numbers in array: ')\nfor i in range(int(num)):\n n = input('num :')\n num_array.append(int(n))\nfor i in range(int(num)):\n print(num_array[i])\nprint('Arrays')\n",
"<assignment token>\nprint('Enter numbers in array: ')\nfor i in range(int(num)):\n n = input('num :')\n num_array.append(int(n))\nfor i in range(int(num)):\n print(num_array[i])\nprint('Arrays')\n",
"<assignment token>\n<code token>\n"
] | false |
98,405 |
ba2ee9c1510da934c12aeb49dcf9cad12849002f
|
from django.test import TestCase
from django.test import RequestFactory
import pandas as pd
import numpy as np
from p3app.pd_df_cache.models import *
from p3app.pd_df_cache.views import DataFrameCacheDetailView
class TestDFCache(TestCase):
def test_models(self):
print("Testing Models")
df = pd.DataFrame(np.random.rand(4,3),columns=('A','B','C'))
dfc = DataFrameCache.create_cache(df,'testcache')
df2 = dfc.get_dataframe()
# cleanup the files created by test
test = [v for i,v in enumerate(df2.columns) if df2.columns[i] == df.columns[i]]
dfc.delete()
self.assertEqual(len(test), 3)
def test_api_view(self):
print("Testing api view")
df = pd.DataFrame(np.random.rand(4,3),columns=('A','B','C'))
dfc = DataFrameCache.create_cache(df,'testcache')
request = RequestFactory().get('/api/dfcache/')
view = DataFrameCacheDetailView.as_view()
response = view(request,pk=dfc.id)
dfc.delete()
self.assertEqual(response.status_code, 200)
|
[
"from django.test import TestCase\nfrom django.test import RequestFactory\nimport pandas as pd\nimport numpy as np\nfrom p3app.pd_df_cache.models import *\nfrom p3app.pd_df_cache.views import DataFrameCacheDetailView\nclass TestDFCache(TestCase):\n def test_models(self):\n print(\"Testing Models\")\n df = pd.DataFrame(np.random.rand(4,3),columns=('A','B','C'))\n dfc = DataFrameCache.create_cache(df,'testcache')\n df2 = dfc.get_dataframe()\n # cleanup the files created by test\n test = [v for i,v in enumerate(df2.columns) if df2.columns[i] == df.columns[i]]\n dfc.delete()\n self.assertEqual(len(test), 3)\n\n def test_api_view(self):\n print(\"Testing api view\")\n df = pd.DataFrame(np.random.rand(4,3),columns=('A','B','C'))\n dfc = DataFrameCache.create_cache(df,'testcache')\n request = RequestFactory().get('/api/dfcache/')\n view = DataFrameCacheDetailView.as_view()\n response = view(request,pk=dfc.id)\n dfc.delete()\n self.assertEqual(response.status_code, 200)\n\n",
"from django.test import TestCase\nfrom django.test import RequestFactory\nimport pandas as pd\nimport numpy as np\nfrom p3app.pd_df_cache.models import *\nfrom p3app.pd_df_cache.views import DataFrameCacheDetailView\n\n\nclass TestDFCache(TestCase):\n\n def test_models(self):\n print('Testing Models')\n df = pd.DataFrame(np.random.rand(4, 3), columns=('A', 'B', 'C'))\n dfc = DataFrameCache.create_cache(df, 'testcache')\n df2 = dfc.get_dataframe()\n test = [v for i, v in enumerate(df2.columns) if df2.columns[i] ==\n df.columns[i]]\n dfc.delete()\n self.assertEqual(len(test), 3)\n\n def test_api_view(self):\n print('Testing api view')\n df = pd.DataFrame(np.random.rand(4, 3), columns=('A', 'B', 'C'))\n dfc = DataFrameCache.create_cache(df, 'testcache')\n request = RequestFactory().get('/api/dfcache/')\n view = DataFrameCacheDetailView.as_view()\n response = view(request, pk=dfc.id)\n dfc.delete()\n self.assertEqual(response.status_code, 200)\n",
"<import token>\n\n\nclass TestDFCache(TestCase):\n\n def test_models(self):\n print('Testing Models')\n df = pd.DataFrame(np.random.rand(4, 3), columns=('A', 'B', 'C'))\n dfc = DataFrameCache.create_cache(df, 'testcache')\n df2 = dfc.get_dataframe()\n test = [v for i, v in enumerate(df2.columns) if df2.columns[i] ==\n df.columns[i]]\n dfc.delete()\n self.assertEqual(len(test), 3)\n\n def test_api_view(self):\n print('Testing api view')\n df = pd.DataFrame(np.random.rand(4, 3), columns=('A', 'B', 'C'))\n dfc = DataFrameCache.create_cache(df, 'testcache')\n request = RequestFactory().get('/api/dfcache/')\n view = DataFrameCacheDetailView.as_view()\n response = view(request, pk=dfc.id)\n dfc.delete()\n self.assertEqual(response.status_code, 200)\n",
"<import token>\n\n\nclass TestDFCache(TestCase):\n\n def test_models(self):\n print('Testing Models')\n df = pd.DataFrame(np.random.rand(4, 3), columns=('A', 'B', 'C'))\n dfc = DataFrameCache.create_cache(df, 'testcache')\n df2 = dfc.get_dataframe()\n test = [v for i, v in enumerate(df2.columns) if df2.columns[i] ==\n df.columns[i]]\n dfc.delete()\n self.assertEqual(len(test), 3)\n <function token>\n",
"<import token>\n\n\nclass TestDFCache(TestCase):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,406 |
da1db7594d171a5a2be13efe9159df134e063d66
|
# -*- coding: utf-8 -*-
"""
Autor:
Andrés lópez joya
Fecha:
Noviembre/2019
Contenido:
Inteligencia de Negocio
Grado en IngenierÃa Informática
Universidad de Granada
"""
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
import xgboost as xgb
import lightgbm as lgb
from sklearn.preprocessing import scale
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier,GradientBoostingRegressor
def normalizar(valores):
c1 = (valores - valores.min()) * 1.0
c2 = (valores.max() - valores.min())
return c1 / c2
le = preprocessing.LabelEncoder()
#
'''
lectura de datos
'''
#los ficheros .csv se han preparado previamente para sustituir ,, y "Not known" por NaN (valores perdidos)
data_x = pd.read_csv('nepal_earthquake_tra.csv')
data_y = pd.read_csv('nepal_earthquake_labels.csv')
data_x_tst = pd.read_csv('nepal_earthquake_tst.csv')
#VISUALIZACIÓN DE LOS DATOS
'''
print("Valores perdidos en x:")
print(data_x.isnull().sum())
'''
'''
print("TIPOS")
print(data_x.dtypes)
'''
'''
print("Valores perdidos en tst:")
print(data_x_tst.isnull().sum())
'''
'''
print("Desequilibrio de valores:")
data_y.damage_grade.value_counts().plot(kind='bar')
plt.xticks(rotation = 0)
plt.show()
'''
print('geo_level_1_id:\n')
print(data_x['geo_level_1_id'].value_counts()[0:6])
print('\ngeo_level_2_id:\n')
print(data_x['geo_level_2_id'].value_counts()[0:6])
print('\ngeo_level_3_id:\n')
print(data_x['geo_level_3_id'].value_counts()[0:6])
print('\ncount_floors_pre_eq:\n')
print(data_x['count_floors_pre_eq'].value_counts()[0:6])
print('\nage:\n')
print(data_x['age'].value_counts()[0:6])
print('\narea_percentage:\n')
print(data_x['area_percentage'].value_counts()[0:6])
print('\nheight_percentage:\n')
print(data_x['height_percentage'].value_counts()[0:6])
print('land_surface_condition:\n')
print(data_x['land_surface_condition'].value_counts()[0:6])
print('\nfoundation_type:\n')
print(data_x['foundation_type'].value_counts()[0:6])
print('\nroof_type:\n')
print(data_x['roof_type'].value_counts()[0:6])
print('\nground_floor_type:\n')
print(data_x['ground_floor_type'].value_counts()[0:6])
print('\nother_floor_type:\n')
print(data_x['other_floor_type'].value_counts()[0:6])
print('\nposition:\n')
print(data_x['position'].value_counts()[0:6])
print('\nplan_configuration:\n')
print(data_x['plan_configuration'].value_counts()[0:6])
print('has_superstructure_adobe_mud:\n')
print(data_x['has_superstructure_adobe_mud'].value_counts()[0:6])
print('\nhas_superstructure_mud_mortar_stone:\n')
print(data_x['has_superstructure_mud_mortar_stone'].value_counts()[0:6])
print('\nhas_superstructure_stone_flag:\n')
print(data_x['has_superstructure_stone_flag'].value_counts()[0:6])
print('\nhas_superstructure_cement_mortar_stone:\n')
print(data_x['has_superstructure_cement_mortar_stone'].value_counts()[0:6])
print('\nhas_superstructure_mud_mortar_brick:\n')
print(data_x['has_superstructure_mud_mortar_brick'].value_counts()[0:6])
print('\nhas_superstructure_cement_mortar_brick:\n')
print(data_x['has_superstructure_cement_mortar_brick'].value_counts()[0:6])
print('\nhas_superstructure_timber:\n')
print(data_x['has_superstructure_timber'].value_counts()[0:6])
print('has_superstructure_bamboo:\n')
print(data_x['has_superstructure_bamboo'].value_counts()[0:6])
print('\nhas_superstructure_rc_non_engineered:\n')
print(data_x['has_superstructure_rc_non_engineered'].value_counts()[0:6])
print('\nhas_superstructure_rc_engineered:\n')
print(data_x['has_superstructure_rc_engineered'].value_counts()[0:6])
print('\nhas_superstructure_other:\n')
print(data_x['has_superstructure_other'].value_counts()[0:6])
print('\nlegal_ownership_status:\n')
print(data_x['legal_ownership_status'].value_counts()[0:6])
print('\ncount_families:\n')
print(data_x['count_families'].value_counts()[0:6])
print('\nhas_secondary_use:\n')
print(data_x['has_secondary_use'].value_counts()[0:6])
print('has_secondary_use_agriculture:\n')
print(data_x['has_secondary_use_agriculture'].value_counts()[0:6])
print('\nhas_secondary_use_hotel:\n')
print(data_x['has_secondary_use_hotel'].value_counts()[0:6])
print('\nhas_secondary_use_rental:\n')
print(data_x['has_secondary_use_rental'].value_counts()[0:6])
print('\nhas_secondary_use_institution:\n')
print(data_x['has_secondary_use_institution'].value_counts()[0:6])
print('\nhas_secondary_use_school:\n')
print(data_x['has_secondary_use_school'].value_counts()[0:6])
print('\nhas_secondary_use_industry:\n')
print(data_x['has_secondary_use_industry'].value_counts()[0:6])
print('\nhas_secondary_use_health_post:\n')
print(data_x['has_secondary_use_health_post'].value_counts()[0:6])
print('has_secondary_use_gov_office:\n')
print(data_x['has_secondary_use_gov_office'].value_counts()[0:6])
print('\nhas_secondary_use_use_police:\n')
print(data_x['has_secondary_use_use_police'].value_counts()[0:6])
print('\nhas_secondary_use_other:\n')
print(data_x['has_secondary_use_other'].value_counts()[0:6])
#PREPROCESADO
#se quitan las columnas que no son muy clasificatorias
print(" Borrando columnas...")
columns_to_drop = ['building_id', 'has_secondary_use_use_police', 'has_secondary_use_gov_office',
'has_secondary_use_health_post','has_secondary_use_school','has_secondary_use_institution',
'has_secondary_use_industry']
data_x.drop(labels=columns_to_drop, axis=1, inplace = True)
data_x_tst.drop(labels=columns_to_drop, axis=1,inplace = True)
data_y.drop(labels=['building_id'], axis=1,inplace = True)
'''
Se convierten las variables categóricas a variables numéricas (ordinales)
'''
from sklearn.preprocessing import LabelEncoder,OrdinalEncoder
from sklearn import preprocessing
mask = data_x.isnull()
data_x_tmp = data_x.fillna(9999)
data_x_tmp = data_x.astype(str).apply(LabelEncoder().fit_transform)
data_x_nan = data_x_tmp.where(~mask, data_x)
mask = data_x_tst.isnull() #máscara para luego recuperar los NaN
data_x_tmp = data_x_tst.fillna(9999) #LabelEncoder no funciona con NaN, se asigna un valor no usado
data_x_tst_tmp = data_x_tmp.astype(str).apply(LabelEncoder().fit_transform) #se convierten categóricas en numéricas
data_x_tst_nan = data_x_tst_tmp.where(~mask, data_x_tst) #se recuperan los NaN
#------------------------------------------------------------------
data_x_norma = data_x_nan.apply(normalizar)
data_x_tst_norma = data_x_tst_nan.apply(normalizar)
'''
data_x_norma,data_x_tst_norma = aplicarnormalizar(data_x_nan,data_x_tst_nan)
'''
X = data_x_norma.values
X_tst = data_x_tst_norma.values
y = np.ravel(data_y.values)
#------------------------------------------------------------------------
'''
Validación cruzada con particionado estratificado y control de la aleatoriedad fijando la semilla
'''
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=123456)
le = preprocessing.LabelEncoder()
from sklearn.metrics import f1_score
def validacion_cruzada(modelo, X, y, cv):
y_test_all = []
for train, test in cv.split(X, y):
X_train = X[train]
y_train, y_test = y[train], y[test]
t = time.time()
modelo = modelo.fit(X_train,y_train)
tiempo = time.time() - t
y_pred = modelo.predict(X[test])
print("F1 score (tst): {:.4f}, tiempo: {:6.2f} segundos".format(f1_score(y_test,y_pred,average='micro') , tiempo))
y_test_all = np.concatenate([y_test_all,y[test]])
print("")
return modelo, y_test_all
#------------------------------------------------------------------------
'''
print("------ XGB...")
clf = xgb.XGBClassifier(n_estimators = 200)
#clf, y_test_clf = validacion_cruzada(clf,X,y,skf)
#'''
#'''
'''----------------SUBMISSION-1-------------------'''
print("-----RANDOM FOREST-----")
lgbm =RandomForestClassifier(n_estimators=320,
criterion="gini",
max_depth=None,
min_samples_split=8,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=True,
n_jobs=-1,
random_state=1,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None)
lgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)
clf = lgbm
clf = clf.fit(X,y)
y_pred_tra = clf.predict(X)
print("F1 score (tra): {:.4f}".format(f1_score(y,y_pred_tra,average='micro')))
y_pred_tst = clf.predict(X_tst)
df_submission = pd.read_csv('nepal_earthquake_submission_format.csv')
df_submission['damage_grade'] = y_pred_tst
df_submission.to_csv("submission2.csv", index=False)
'''----------------SUBMISSION-2-------------------
print("-----KNN-----")
knn = KNeighborsClassifier(n_neighbors=3)
knn, y_test_lgbm = validacion_cruzada(knn,X,y,skf)
clf = knn
clf = clf.fit(X,y)
y_pred_tra = clf.predict(X)
print("F1 score (tra): {:.4f}".format(f1_score(y,y_pred_tra,average='micro')))
y_pred_tst = clf.predict(X_tst)
df_submission = pd.read_csv('nepal_earthquake_submission_format.csv')
df_submission['damage_grade'] = y_pred_tst
df_submission.to_csv("submission1.csv", index=False)
'''
'''----------------SUBMISSION-3-------------------
print("-----XGB-----")
lgbm =xgb.XGBClassifier(n_estimators = 200)
lgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)
clf = lgbm
clf = clf.fit(X,y)
y_pred_tra = clf.predict(X)
print("F1 score (tra): {:.4f}".format(f1_score(y,y_pred_tra,average='micro')))
y_pred_tst = clf.predict(X_tst)
df_submission = pd.read_csv('nepal_earthquake_submission_format.csv')
df_submission['damage_grade'] = y_pred_tst
df_submission.to_csv("submission3.csv", index=False)
'''
'''----------------SUBMISSION-4-------------------
print("-----GRADIENT BOOSTING-----")
lgbm =GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_decrease=0.,
min_impurity_split=None, init=None,
random_state=None, max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='deprecated', validation_fraction=0.1,
n_iter_no_change=None, tol=1e-4, ccp_alpha=0.0)
lgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)
clf = lgbm
clf = clf.fit(X,y)
y_pred_tra = clf.predict(X)
print("F1 score (tra): {:.4f}".format(f1_score(y,y_pred_tra,average='micro')))
y_pred_tst = clf.predict(X_tst)
df_submission = pd.read_csv('nepal_earthquake_submission_format.csv')
df_submission['damage_grade'] = y_pred_tst
df_submission.to_csv("submission4.csv", index=False)
'''
'''----------------SUBMISSION-5-------------------
print("-----EXTRA TREE-----")
lgbm =ExtraTreesClassifier(n_estimators=320,
criterion="gini",
max_depth=None,
min_samples_split=8,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=True,
n_jobs=-1,
random_state=1,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None
)
lgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)
clf = lgbm
clf = clf.fit(X,y)
y_pred_tra = clf.predict(X)
print("F1 score (tra): {:.4f}".format(f1_score(y,y_pred_tra,average='micro')))
y_pred_tst = clf.predict(X_tst)
df_submission = pd.read_csv('nepal_earthquake_submission_format.csv')
df_submission['damage_grade'] = y_pred_tst
df_submission.to_csv("submission5.csv", index=False)
'''
'''
print("-----SVM-----")
lgbm =svm.SVC(decision_function_shape='ovo')
lgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)
clf = lgbm
clf = clf.fit(X,y)
y_pred_tra = clf.predict(X)
print("F1 score (tra): {:.4f}".format(f1_score(y,y_pred_tra,average='micro')))
y_pred_tst = clf.predict(X_tst)
df_submission = pd.read_csv('nepal_earthquake_submission_format.csv')
df_submission['damage_grade'] = y_pred_tst
df_submission.to_csv("submission5.csv", index=False)
'''
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nAutor:\r\n Andrés lópez joya\r\nFecha:\r\n Noviembre/2019\r\nContenido:\r\n \r\n Inteligencia de Negocio\r\n Grado en IngenierÃa Informática\r\n Universidad de Granada\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn import preprocessing\r\nimport xgboost as xgb\r\nimport lightgbm as lgb\r\nfrom sklearn.preprocessing import scale\r\nfrom sklearn import svm\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier,GradientBoostingRegressor\r\n\r\ndef normalizar(valores):\r\n c1 = (valores - valores.min()) * 1.0\r\n c2 = (valores.max() - valores.min())\r\n return c1 / c2\r\n\r\nle = preprocessing.LabelEncoder()\r\n#\r\n\r\n'''\r\nlectura de datos\r\n'''\r\n#los ficheros .csv se han preparado previamente para sustituir ,, y \"Not known\" por NaN (valores perdidos)\r\ndata_x = pd.read_csv('nepal_earthquake_tra.csv')\r\ndata_y = pd.read_csv('nepal_earthquake_labels.csv')\r\ndata_x_tst = pd.read_csv('nepal_earthquake_tst.csv')\r\n\r\n#VISUALIZACIÓN DE LOS DATOS\r\n'''\r\nprint(\"Valores perdidos en x:\")\r\n\r\nprint(data_x.isnull().sum())\r\n'''\r\n'''\r\nprint(\"TIPOS\")\r\n\r\nprint(data_x.dtypes)\r\n'''\r\n'''\r\nprint(\"Valores perdidos en tst:\")\r\n\r\nprint(data_x_tst.isnull().sum())\r\n'''\r\n\r\n'''\r\nprint(\"Desequilibrio de valores:\")\r\ndata_y.damage_grade.value_counts().plot(kind='bar')\r\nplt.xticks(rotation = 0)\r\nplt.show()\r\n'''\r\nprint('geo_level_1_id:\\n')\r\nprint(data_x['geo_level_1_id'].value_counts()[0:6])\r\nprint('\\ngeo_level_2_id:\\n')\r\nprint(data_x['geo_level_2_id'].value_counts()[0:6])\r\nprint('\\ngeo_level_3_id:\\n')\r\nprint(data_x['geo_level_3_id'].value_counts()[0:6])\r\nprint('\\ncount_floors_pre_eq:\\n')\r\nprint(data_x['count_floors_pre_eq'].value_counts()[0:6])\r\nprint('\\nage:\\n')\r\nprint(data_x['age'].value_counts()[0:6])\r\nprint('\\narea_percentage:\\n')\r\nprint(data_x['area_percentage'].value_counts()[0:6])\r\nprint('\\nheight_percentage:\\n')\r\nprint(data_x['height_percentage'].value_counts()[0:6])\r\n\r\nprint('land_surface_condition:\\n')\r\nprint(data_x['land_surface_condition'].value_counts()[0:6])\r\nprint('\\nfoundation_type:\\n')\r\nprint(data_x['foundation_type'].value_counts()[0:6])\r\nprint('\\nroof_type:\\n')\r\nprint(data_x['roof_type'].value_counts()[0:6])\r\nprint('\\nground_floor_type:\\n')\r\nprint(data_x['ground_floor_type'].value_counts()[0:6])\r\nprint('\\nother_floor_type:\\n')\r\nprint(data_x['other_floor_type'].value_counts()[0:6])\r\nprint('\\nposition:\\n')\r\nprint(data_x['position'].value_counts()[0:6])\r\nprint('\\nplan_configuration:\\n')\r\nprint(data_x['plan_configuration'].value_counts()[0:6])\r\n\r\nprint('has_superstructure_adobe_mud:\\n')\r\nprint(data_x['has_superstructure_adobe_mud'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_mud_mortar_stone:\\n')\r\nprint(data_x['has_superstructure_mud_mortar_stone'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_stone_flag:\\n')\r\nprint(data_x['has_superstructure_stone_flag'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_cement_mortar_stone:\\n')\r\nprint(data_x['has_superstructure_cement_mortar_stone'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_mud_mortar_brick:\\n')\r\nprint(data_x['has_superstructure_mud_mortar_brick'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_cement_mortar_brick:\\n')\r\nprint(data_x['has_superstructure_cement_mortar_brick'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_timber:\\n')\r\nprint(data_x['has_superstructure_timber'].value_counts()[0:6])\r\n\r\nprint('has_superstructure_bamboo:\\n')\r\nprint(data_x['has_superstructure_bamboo'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_rc_non_engineered:\\n')\r\nprint(data_x['has_superstructure_rc_non_engineered'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_rc_engineered:\\n')\r\nprint(data_x['has_superstructure_rc_engineered'].value_counts()[0:6])\r\nprint('\\nhas_superstructure_other:\\n')\r\nprint(data_x['has_superstructure_other'].value_counts()[0:6])\r\nprint('\\nlegal_ownership_status:\\n')\r\nprint(data_x['legal_ownership_status'].value_counts()[0:6])\r\nprint('\\ncount_families:\\n')\r\nprint(data_x['count_families'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use:\\n')\r\nprint(data_x['has_secondary_use'].value_counts()[0:6])\r\n\r\nprint('has_secondary_use_agriculture:\\n')\r\nprint(data_x['has_secondary_use_agriculture'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_hotel:\\n')\r\nprint(data_x['has_secondary_use_hotel'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_rental:\\n')\r\nprint(data_x['has_secondary_use_rental'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_institution:\\n')\r\nprint(data_x['has_secondary_use_institution'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_school:\\n')\r\nprint(data_x['has_secondary_use_school'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_industry:\\n')\r\nprint(data_x['has_secondary_use_industry'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_health_post:\\n')\r\nprint(data_x['has_secondary_use_health_post'].value_counts()[0:6])\r\n\r\nprint('has_secondary_use_gov_office:\\n')\r\nprint(data_x['has_secondary_use_gov_office'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_use_police:\\n')\r\nprint(data_x['has_secondary_use_use_police'].value_counts()[0:6])\r\nprint('\\nhas_secondary_use_other:\\n')\r\nprint(data_x['has_secondary_use_other'].value_counts()[0:6])\r\n\r\n#PREPROCESADO\r\n#se quitan las columnas que no son muy clasificatorias\r\nprint(\" Borrando columnas...\")\r\ncolumns_to_drop = ['building_id', 'has_secondary_use_use_police', 'has_secondary_use_gov_office',\r\n 'has_secondary_use_health_post','has_secondary_use_school','has_secondary_use_institution',\r\n 'has_secondary_use_industry']\r\ndata_x.drop(labels=columns_to_drop, axis=1, inplace = True)\r\n\r\ndata_x_tst.drop(labels=columns_to_drop, axis=1,inplace = True)\r\ndata_y.drop(labels=['building_id'], axis=1,inplace = True)\r\n\r\n\r\n'''\r\nSe convierten las variables categóricas a variables numéricas (ordinales)\r\n'''\r\nfrom sklearn.preprocessing import LabelEncoder,OrdinalEncoder\r\nfrom sklearn import preprocessing\r\nmask = data_x.isnull()\r\n\r\ndata_x_tmp = data_x.fillna(9999)\r\n\r\ndata_x_tmp = data_x.astype(str).apply(LabelEncoder().fit_transform)\r\n\r\ndata_x_nan = data_x_tmp.where(~mask, data_x)\r\n\r\nmask = data_x_tst.isnull() #máscara para luego recuperar los NaN\r\ndata_x_tmp = data_x_tst.fillna(9999) #LabelEncoder no funciona con NaN, se asigna un valor no usado\r\n\r\ndata_x_tst_tmp = data_x_tmp.astype(str).apply(LabelEncoder().fit_transform) #se convierten categóricas en numéricas\r\n\r\ndata_x_tst_nan = data_x_tst_tmp.where(~mask, data_x_tst) #se recuperan los NaN\r\n\r\n\r\n\r\n#------------------------------------------------------------------\r\ndata_x_norma = data_x_nan.apply(normalizar)\r\ndata_x_tst_norma = data_x_tst_nan.apply(normalizar)\r\n\r\n'''\r\ndata_x_norma,data_x_tst_norma = aplicarnormalizar(data_x_nan,data_x_tst_nan)\r\n'''\r\nX = data_x_norma.values\r\nX_tst = data_x_tst_norma.values\r\ny = np.ravel(data_y.values)\r\n\r\n\r\n#------------------------------------------------------------------------\r\n'''\r\nValidación cruzada con particionado estratificado y control de la aleatoriedad fijando la semilla\r\n'''\r\n\r\nskf = StratifiedKFold(n_splits=5, shuffle=True, random_state=123456)\r\nle = preprocessing.LabelEncoder()\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\ndef validacion_cruzada(modelo, X, y, cv):\r\n y_test_all = []\r\n\r\n for train, test in cv.split(X, y):\r\n X_train = X[train]\r\n y_train, y_test = y[train], y[test]\r\n \r\n \r\n \r\n t = time.time()\r\n \r\n modelo = modelo.fit(X_train,y_train)\r\n tiempo = time.time() - t\r\n y_pred = modelo.predict(X[test])\r\n print(\"F1 score (tst): {:.4f}, tiempo: {:6.2f} segundos\".format(f1_score(y_test,y_pred,average='micro') , tiempo))\r\n y_test_all = np.concatenate([y_test_all,y[test]])\r\n\r\n print(\"\")\r\n\r\n return modelo, y_test_all\r\n#------------------------------------------------------------------------\r\n\r\n'''\r\nprint(\"------ XGB...\")\r\nclf = xgb.XGBClassifier(n_estimators = 200)\r\n#clf, y_test_clf = validacion_cruzada(clf,X,y,skf)\r\n#'''\r\n\r\n#'''\r\n'''----------------SUBMISSION-1-------------------'''\r\nprint(\"-----RANDOM FOREST-----\")\r\nlgbm =RandomForestClassifier(n_estimators=320,\r\n criterion=\"gini\",\r\n max_depth=None,\r\n min_samples_split=8,\r\n min_samples_leaf=1,\r\n min_weight_fraction_leaf=0.,\r\n max_features=\"auto\",\r\n max_leaf_nodes=None,\r\n min_impurity_decrease=0.,\r\n min_impurity_split=None,\r\n bootstrap=True,\r\n oob_score=True,\r\n n_jobs=-1,\r\n random_state=1,\r\n verbose=0,\r\n warm_start=False,\r\n class_weight=None,\r\n ccp_alpha=0.0,\r\n max_samples=None)\r\n\r\nlgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)\r\nclf = lgbm\r\nclf = clf.fit(X,y)\r\ny_pred_tra = clf.predict(X)\r\nprint(\"F1 score (tra): {:.4f}\".format(f1_score(y,y_pred_tra,average='micro')))\r\ny_pred_tst = clf.predict(X_tst)\r\n\r\ndf_submission = pd.read_csv('nepal_earthquake_submission_format.csv')\r\ndf_submission['damage_grade'] = y_pred_tst\r\ndf_submission.to_csv(\"submission2.csv\", index=False)\r\n\r\n'''----------------SUBMISSION-2-------------------\r\nprint(\"-----KNN-----\")\r\nknn = KNeighborsClassifier(n_neighbors=3)\r\n\r\nknn, y_test_lgbm = validacion_cruzada(knn,X,y,skf)\r\nclf = knn\r\nclf = clf.fit(X,y)\r\ny_pred_tra = clf.predict(X)\r\nprint(\"F1 score (tra): {:.4f}\".format(f1_score(y,y_pred_tra,average='micro')))\r\ny_pred_tst = clf.predict(X_tst)\r\n\r\ndf_submission = pd.read_csv('nepal_earthquake_submission_format.csv')\r\ndf_submission['damage_grade'] = y_pred_tst\r\ndf_submission.to_csv(\"submission1.csv\", index=False)\r\n'''\r\n'''----------------SUBMISSION-3-------------------\r\nprint(\"-----XGB-----\")\r\nlgbm =xgb.XGBClassifier(n_estimators = 200)\r\n\r\nlgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)\r\nclf = lgbm\r\nclf = clf.fit(X,y)\r\ny_pred_tra = clf.predict(X)\r\nprint(\"F1 score (tra): {:.4f}\".format(f1_score(y,y_pred_tra,average='micro')))\r\ny_pred_tst = clf.predict(X_tst)\r\n\r\ndf_submission = pd.read_csv('nepal_earthquake_submission_format.csv')\r\ndf_submission['damage_grade'] = y_pred_tst\r\ndf_submission.to_csv(\"submission3.csv\", index=False)\r\n'''\r\n'''----------------SUBMISSION-4-------------------\r\nprint(\"-----GRADIENT BOOSTING-----\")\r\nlgbm =GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=100,\r\n subsample=1.0, criterion='friedman_mse', min_samples_split=2,\r\n min_samples_leaf=1, min_weight_fraction_leaf=0.,\r\n max_depth=3, min_impurity_decrease=0.,\r\n min_impurity_split=None, init=None,\r\n random_state=None, max_features=None, verbose=0,\r\n max_leaf_nodes=None, warm_start=False,\r\n presort='deprecated', validation_fraction=0.1,\r\n n_iter_no_change=None, tol=1e-4, ccp_alpha=0.0)\r\n\r\nlgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)\r\nclf = lgbm\r\nclf = clf.fit(X,y)\r\ny_pred_tra = clf.predict(X)\r\nprint(\"F1 score (tra): {:.4f}\".format(f1_score(y,y_pred_tra,average='micro')))\r\ny_pred_tst = clf.predict(X_tst)\r\n\r\ndf_submission = pd.read_csv('nepal_earthquake_submission_format.csv')\r\ndf_submission['damage_grade'] = y_pred_tst\r\ndf_submission.to_csv(\"submission4.csv\", index=False)\r\n'''\r\n'''----------------SUBMISSION-5-------------------\r\nprint(\"-----EXTRA TREE-----\")\r\nlgbm =ExtraTreesClassifier(n_estimators=320,\r\n criterion=\"gini\",\r\n max_depth=None,\r\n min_samples_split=8,\r\n min_samples_leaf=1,\r\n min_weight_fraction_leaf=0.,\r\n max_features=\"auto\",\r\n max_leaf_nodes=None,\r\n min_impurity_decrease=0.,\r\n min_impurity_split=None,\r\n bootstrap=True,\r\n oob_score=True,\r\n n_jobs=-1,\r\n random_state=1,\r\n verbose=0,\r\n warm_start=False,\r\n class_weight=None,\r\n ccp_alpha=0.0,\r\n max_samples=None\r\n )\r\n\r\nlgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)\r\nclf = lgbm\r\nclf = clf.fit(X,y)\r\ny_pred_tra = clf.predict(X)\r\nprint(\"F1 score (tra): {:.4f}\".format(f1_score(y,y_pred_tra,average='micro')))\r\ny_pred_tst = clf.predict(X_tst)\r\n\r\ndf_submission = pd.read_csv('nepal_earthquake_submission_format.csv')\r\ndf_submission['damage_grade'] = y_pred_tst\r\ndf_submission.to_csv(\"submission5.csv\", index=False)\r\n'''\r\n'''\r\nprint(\"-----SVM-----\")\r\nlgbm =svm.SVC(decision_function_shape='ovo')\r\n\r\nlgbm, y_test_lgbm = validacion_cruzada(lgbm,X,y,skf)\r\nclf = lgbm\r\nclf = clf.fit(X,y)\r\ny_pred_tra = clf.predict(X)\r\nprint(\"F1 score (tra): {:.4f}\".format(f1_score(y,y_pred_tra,average='micro')))\r\ny_pred_tst = clf.predict(X_tst)\r\n\r\ndf_submission = pd.read_csv('nepal_earthquake_submission_format.csv')\r\ndf_submission['damage_grade'] = y_pred_tst\r\ndf_submission.to_csv(\"submission5.csv\", index=False)\r\n'''"
] | true |
98,407 |
38819afa5101c0533b59eaf2935685d78c24dabb
|
# viết chương trình in ra chữ xin chàochào
print("Xin chào...!")
# tính tổng bình phương 2 số nguyên nhập từ bàn phím
print("nhập số thứ nhất:",end="")
a = input()
print("nhập số thứ haihai:",end="")
b = input()
a= int(a)
b= int(b)
tong = pow(a,2) + pow(b,2)
print(int(tong))
# tiền lương (lương chính=((tiền lương cơ bản + phụ cấp)/22)) * số ngày đi làm))
print("tiền lương cơ bản: ",end="")
a = int(input())
print("Phụ cấp: ",end="")
b = int(input())
print("số ngày đi làm: ",end="")
c = int(input())
tong = ((a+b)/22)*c
print(int(tong))
# tính chu vi và dien tich hinh tron
PI=3.14
print("nhap chieu dai ban kinh r:",end="")
a=int(input())
chuvi = a*2*PI
print("ket qua chu vi="+str (a*2*PI))
dientich = a*a*PI
print("ket qua dien tich+"+str (a*a*PI))
# in ra số hàng chục và hàng đơn vịvị
print("nhap so co hai chu so:",end="")
a=int(input())
if a > 10:
chuc = a // 10
donvi = a % 10
print("hang chuc la: "+str (chuc))
print("hang don vi la: "+str(donvi))
# nhap thong tin va kiem tra kieu du lieu
print("Nhap Ten:",end="")
ten=input()
print(type(ten))
print("Nhap Ma So Sinh Vien: ",end="")
mssv=int (input())
print(type(mssv))
print("Nhap Tuoi: ",end="")
tuoi=int (input())
print(type(tuoi))
# ki tu thu nhat trong chuoichuoi
print("Nhap ten:",end="")
ten=input()
len(ten)
print("ky tu thu nhat trong chuoi la: "+format(ten[1]))
# ngich dao 2 soso
print("nhap so can nghich dao: ",end="")
so=int(input())
print(str(so)[::-1])
|
[
"# viết chương trình in ra chữ xin chàochào\r\nprint(\"Xin chào...!\")\r\n\r\n# tính tổng bình phương 2 số nguyên nhập từ bàn phím\r\nprint(\"nhập số thứ nhất:\",end=\"\")\r\na = input()\r\nprint(\"nhập số thứ haihai:\",end=\"\")\r\nb = input()\r\na= int(a)\r\nb= int(b)\r\ntong = pow(a,2) + pow(b,2)\r\nprint(int(tong))\r\n\r\n# tiền lương (lương chính=((tiền lương cơ bản + phụ cấp)/22)) * số ngày đi làm))\r\nprint(\"tiền lương cơ bản: \",end=\"\")\r\na = int(input())\r\nprint(\"Phụ cấp: \",end=\"\")\r\nb = int(input())\r\nprint(\"số ngày đi làm: \",end=\"\")\r\nc = int(input())\r\ntong = ((a+b)/22)*c\r\nprint(int(tong))\r\n\r\n# tính chu vi và dien tich hinh tron\r\nPI=3.14\r\nprint(\"nhap chieu dai ban kinh r:\",end=\"\")\r\na=int(input())\r\nchuvi = a*2*PI\r\nprint(\"ket qua chu vi=\"+str (a*2*PI))\r\ndientich = a*a*PI\r\nprint(\"ket qua dien tich+\"+str (a*a*PI))\r\n\r\n# in ra số hàng chục và hàng đơn vịvị\r\nprint(\"nhap so co hai chu so:\",end=\"\")\r\na=int(input())\r\nif a > 10:\r\n chuc = a // 10\r\n donvi = a % 10\r\nprint(\"hang chuc la: \"+str (chuc))\r\nprint(\"hang don vi la: \"+str(donvi))\r\n\r\n# nhap thong tin va kiem tra kieu du lieu\r\nprint(\"Nhap Ten:\",end=\"\")\r\nten=input()\r\nprint(type(ten))\r\nprint(\"Nhap Ma So Sinh Vien: \",end=\"\")\r\nmssv=int (input())\r\nprint(type(mssv))\r\nprint(\"Nhap Tuoi: \",end=\"\")\r\ntuoi=int (input())\r\nprint(type(tuoi))\r\n\r\n# ki tu thu nhat trong chuoichuoi\r\nprint(\"Nhap ten:\",end=\"\")\r\nten=input()\r\nlen(ten)\r\nprint(\"ky tu thu nhat trong chuoi la: \"+format(ten[1]))\r\n\r\n# ngich dao 2 soso\r\nprint(\"nhap so can nghich dao: \",end=\"\")\r\nso=int(input())\r\nprint(str(so)[::-1])",
"print('Xin chào...!')\nprint('nhập số thứ nhất:', end='')\na = input()\nprint('nhập số thứ haihai:', end='')\nb = input()\na = int(a)\nb = int(b)\ntong = pow(a, 2) + pow(b, 2)\nprint(int(tong))\nprint('tiền lương cơ bản: ', end='')\na = int(input())\nprint('Phụ cấp: ', end='')\nb = int(input())\nprint('số ngày đi làm: ', end='')\nc = int(input())\ntong = (a + b) / 22 * c\nprint(int(tong))\nPI = 3.14\nprint('nhap chieu dai ban kinh r:', end='')\na = int(input())\nchuvi = a * 2 * PI\nprint('ket qua chu vi=' + str(a * 2 * PI))\ndientich = a * a * PI\nprint('ket qua dien tich+' + str(a * a * PI))\nprint('nhap so co hai chu so:', end='')\na = int(input())\nif a > 10:\n chuc = a // 10\n donvi = a % 10\nprint('hang chuc la: ' + str(chuc))\nprint('hang don vi la: ' + str(donvi))\nprint('Nhap Ten:', end='')\nten = input()\nprint(type(ten))\nprint('Nhap Ma So Sinh Vien: ', end='')\nmssv = int(input())\nprint(type(mssv))\nprint('Nhap Tuoi: ', end='')\ntuoi = int(input())\nprint(type(tuoi))\nprint('Nhap ten:', end='')\nten = input()\nlen(ten)\nprint('ky tu thu nhat trong chuoi la: ' + format(ten[1]))\nprint('nhap so can nghich dao: ', end='')\nso = int(input())\nprint(str(so)[::-1])\n",
"print('Xin chào...!')\nprint('nhập số thứ nhất:', end='')\n<assignment token>\nprint('nhập số thứ haihai:', end='')\n<assignment token>\nprint(int(tong))\nprint('tiền lương cơ bản: ', end='')\n<assignment token>\nprint('Phụ cấp: ', end='')\n<assignment token>\nprint('số ngày đi làm: ', end='')\n<assignment token>\nprint(int(tong))\n<assignment token>\nprint('nhap chieu dai ban kinh r:', end='')\n<assignment token>\nprint('ket qua chu vi=' + str(a * 2 * PI))\n<assignment token>\nprint('ket qua dien tich+' + str(a * a * PI))\nprint('nhap so co hai chu so:', end='')\n<assignment token>\nif a > 10:\n chuc = a // 10\n donvi = a % 10\nprint('hang chuc la: ' + str(chuc))\nprint('hang don vi la: ' + str(donvi))\nprint('Nhap Ten:', end='')\n<assignment token>\nprint(type(ten))\nprint('Nhap Ma So Sinh Vien: ', end='')\n<assignment token>\nprint(type(mssv))\nprint('Nhap Tuoi: ', end='')\n<assignment token>\nprint(type(tuoi))\nprint('Nhap ten:', end='')\n<assignment token>\nlen(ten)\nprint('ky tu thu nhat trong chuoi la: ' + format(ten[1]))\nprint('nhap so can nghich dao: ', end='')\n<assignment token>\nprint(str(so)[::-1])\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,408 |
383e48bac402adaf78c7c603dfd609f02c476180
|
from setuptools import setup
from setuptools.command.install import install
from os.path import expanduser
from shutil import copyfile
setup(
name='Clusperin',
version='0.1',
author='Franca AF (Alexander da Franca Fernandes)',
author_email='[email protected]',
license='BSD',
description='Tool to cluster proteins.',
long_description='Tool to cluster proteins using Galpering method.',
scripts=['bin/clusperin'],
packages=[ 'clusperin' ],
platforms='Linux',
url='http://bioinfoteam.fiocruz.br/clusperin',
install_requires=[ ],
)
|
[
"from setuptools import setup\nfrom setuptools.command.install import install\nfrom os.path import expanduser\nfrom shutil import copyfile\n\nsetup(\n name='Clusperin',\n version='0.1',\n author='Franca AF (Alexander da Franca Fernandes)',\n author_email='[email protected]',\n license='BSD',\n description='Tool to cluster proteins.',\n long_description='Tool to cluster proteins using Galpering method.',\n scripts=['bin/clusperin'],\n packages=[ 'clusperin' ],\n platforms='Linux',\n url='http://bioinfoteam.fiocruz.br/clusperin',\n install_requires=[ ],\n)\n\n\n",
"from setuptools import setup\nfrom setuptools.command.install import install\nfrom os.path import expanduser\nfrom shutil import copyfile\nsetup(name='Clusperin', version='0.1', author=\n 'Franca AF (Alexander da Franca Fernandes)', author_email=\n '[email protected]', license='BSD', description=\n 'Tool to cluster proteins.', long_description=\n 'Tool to cluster proteins using Galpering method.', scripts=[\n 'bin/clusperin'], packages=['clusperin'], platforms='Linux', url=\n 'http://bioinfoteam.fiocruz.br/clusperin', install_requires=[])\n",
"<import token>\nsetup(name='Clusperin', version='0.1', author=\n 'Franca AF (Alexander da Franca Fernandes)', author_email=\n '[email protected]', license='BSD', description=\n 'Tool to cluster proteins.', long_description=\n 'Tool to cluster proteins using Galpering method.', scripts=[\n 'bin/clusperin'], packages=['clusperin'], platforms='Linux', url=\n 'http://bioinfoteam.fiocruz.br/clusperin', install_requires=[])\n",
"<import token>\n<code token>\n"
] | false |
98,409 |
110be29d581283406cb9c4d30f284c6157abb313
|
import numpy as np
W1 = np.zeros([4, 3])
W2 = np.zeros([4, 4])
W3 = np.zeros([1, 4])
b1 = np.zeros([4, 1])
b2 = np.zeros([4, 1])
b3 = np.zeros([1, 1])
# forward-pass of a 3-layer neural network:
f = lambda x: 1.0 / (1.0 + np.exp(-x)) # activation function (use sigmoid)
x = np.random.randn(3, 1) # random input vector of three numbers (3x1)
hidden_layer_1 = f(np.dot(W1, x) + b1) # calculate first hidden layer activations (4x1)
hidden_layer_2 = f(np.dot(W2, hidden_layer_1) + b2) # calculate second hidden layer activations (4x1)
output_layer = np.dot(W3, hidden_layer_2) + b3 # output neuron (1x1)
print(x)
print(f(x))
|
[
"import numpy as np\n\nW1 = np.zeros([4, 3])\nW2 = np.zeros([4, 4])\nW3 = np.zeros([1, 4])\nb1 = np.zeros([4, 1])\nb2 = np.zeros([4, 1])\nb3 = np.zeros([1, 1])\n\n# forward-pass of a 3-layer neural network:\nf = lambda x: 1.0 / (1.0 + np.exp(-x)) # activation function (use sigmoid)\nx = np.random.randn(3, 1) # random input vector of three numbers (3x1)\nhidden_layer_1 = f(np.dot(W1, x) + b1) # calculate first hidden layer activations (4x1)\nhidden_layer_2 = f(np.dot(W2, hidden_layer_1) + b2) # calculate second hidden layer activations (4x1)\noutput_layer = np.dot(W3, hidden_layer_2) + b3 # output neuron (1x1)\nprint(x)\nprint(f(x))\n",
"import numpy as np\nW1 = np.zeros([4, 3])\nW2 = np.zeros([4, 4])\nW3 = np.zeros([1, 4])\nb1 = np.zeros([4, 1])\nb2 = np.zeros([4, 1])\nb3 = np.zeros([1, 1])\nf = lambda x: 1.0 / (1.0 + np.exp(-x))\nx = np.random.randn(3, 1)\nhidden_layer_1 = f(np.dot(W1, x) + b1)\nhidden_layer_2 = f(np.dot(W2, hidden_layer_1) + b2)\noutput_layer = np.dot(W3, hidden_layer_2) + b3\nprint(x)\nprint(f(x))\n",
"<import token>\nW1 = np.zeros([4, 3])\nW2 = np.zeros([4, 4])\nW3 = np.zeros([1, 4])\nb1 = np.zeros([4, 1])\nb2 = np.zeros([4, 1])\nb3 = np.zeros([1, 1])\nf = lambda x: 1.0 / (1.0 + np.exp(-x))\nx = np.random.randn(3, 1)\nhidden_layer_1 = f(np.dot(W1, x) + b1)\nhidden_layer_2 = f(np.dot(W2, hidden_layer_1) + b2)\noutput_layer = np.dot(W3, hidden_layer_2) + b3\nprint(x)\nprint(f(x))\n",
"<import token>\n<assignment token>\nprint(x)\nprint(f(x))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,410 |
51e16f9520ab44f082ab5f2761c2117b6c36513e
|
from flask import Flask
from flask_api_starter.config import Config
def create_app(app_name):
app = Flask(app_name)
app.config.from_object(Config)
from flask_api_starter.apiv1 import apiv1
app.register_blueprint(apiv1, url_prefix='/api/v1')
return app
|
[
"from flask import Flask\nfrom flask_api_starter.config import Config\n\n\ndef create_app(app_name):\n app = Flask(app_name)\n\n app.config.from_object(Config)\n\n from flask_api_starter.apiv1 import apiv1\n app.register_blueprint(apiv1, url_prefix='/api/v1')\n\n return app\n",
"from flask import Flask\nfrom flask_api_starter.config import Config\n\n\ndef create_app(app_name):\n app = Flask(app_name)\n app.config.from_object(Config)\n from flask_api_starter.apiv1 import apiv1\n app.register_blueprint(apiv1, url_prefix='/api/v1')\n return app\n",
"<import token>\n\n\ndef create_app(app_name):\n app = Flask(app_name)\n app.config.from_object(Config)\n from flask_api_starter.apiv1 import apiv1\n app.register_blueprint(apiv1, url_prefix='/api/v1')\n return app\n",
"<import token>\n<function token>\n"
] | false |
98,411 |
827e97f95eadb5f11370d222a98b24d96bf0d0dc
|
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
import time
pub_key = """-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAMZ25lt7KbsuXJtiRqFYJeRoRf6BWZonlHYonIlOUQ/d58QL9gC/qzmH
IVkl6bNIMFp//Xjnfb4Sv6Lr7Rxab0PUNMND3N4fGcXOtBif2asS1aXWJ+UX8ofA
8eGrMNX9sCbGRFCYam+g6fYR8kmu8b0xhqnca7DMUrjCuv3JswHtAgMBAAE=
-----END RSA PUBLIC KEY-----"""
pub = RSA.importKey(pub_key)
cipher = PKCS1_v1_5.new(pub)
ts = str(time.time())[0:10]
content = "time="+ts+"&sign=megvii"
encrypt_hex = cipher.encrypt(content.encode(encoding='utf-8')).hex()
args = "time="+ts+"&signature="+encrypt_hex
print(args)
|
[
"from Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_v1_5\nimport time\n\npub_key = \"\"\"-----BEGIN RSA PUBLIC KEY-----\nMIGJAoGBAMZ25lt7KbsuXJtiRqFYJeRoRf6BWZonlHYonIlOUQ/d58QL9gC/qzmH\nIVkl6bNIMFp//Xjnfb4Sv6Lr7Rxab0PUNMND3N4fGcXOtBif2asS1aXWJ+UX8ofA\n8eGrMNX9sCbGRFCYam+g6fYR8kmu8b0xhqnca7DMUrjCuv3JswHtAgMBAAE=\n-----END RSA PUBLIC KEY-----\"\"\"\n\n\n\n\npub = RSA.importKey(pub_key)\ncipher = PKCS1_v1_5.new(pub)\nts = str(time.time())[0:10]\ncontent = \"time=\"+ts+\"&sign=megvii\"\nencrypt_hex = cipher.encrypt(content.encode(encoding='utf-8')).hex()\nargs = \"time=\"+ts+\"&signature=\"+encrypt_hex\nprint(args)\n",
"from Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_v1_5\nimport time\npub_key = \"\"\"-----BEGIN RSA PUBLIC KEY-----\nMIGJAoGBAMZ25lt7KbsuXJtiRqFYJeRoRf6BWZonlHYonIlOUQ/d58QL9gC/qzmH\nIVkl6bNIMFp//Xjnfb4Sv6Lr7Rxab0PUNMND3N4fGcXOtBif2asS1aXWJ+UX8ofA\n8eGrMNX9sCbGRFCYam+g6fYR8kmu8b0xhqnca7DMUrjCuv3JswHtAgMBAAE=\n-----END RSA PUBLIC KEY-----\"\"\"\npub = RSA.importKey(pub_key)\ncipher = PKCS1_v1_5.new(pub)\nts = str(time.time())[0:10]\ncontent = 'time=' + ts + '&sign=megvii'\nencrypt_hex = cipher.encrypt(content.encode(encoding='utf-8')).hex()\nargs = 'time=' + ts + '&signature=' + encrypt_hex\nprint(args)\n",
"<import token>\npub_key = \"\"\"-----BEGIN RSA PUBLIC KEY-----\nMIGJAoGBAMZ25lt7KbsuXJtiRqFYJeRoRf6BWZonlHYonIlOUQ/d58QL9gC/qzmH\nIVkl6bNIMFp//Xjnfb4Sv6Lr7Rxab0PUNMND3N4fGcXOtBif2asS1aXWJ+UX8ofA\n8eGrMNX9sCbGRFCYam+g6fYR8kmu8b0xhqnca7DMUrjCuv3JswHtAgMBAAE=\n-----END RSA PUBLIC KEY-----\"\"\"\npub = RSA.importKey(pub_key)\ncipher = PKCS1_v1_5.new(pub)\nts = str(time.time())[0:10]\ncontent = 'time=' + ts + '&sign=megvii'\nencrypt_hex = cipher.encrypt(content.encode(encoding='utf-8')).hex()\nargs = 'time=' + ts + '&signature=' + encrypt_hex\nprint(args)\n",
"<import token>\n<assignment token>\nprint(args)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,412 |
b811d312471ad21c74b98f09a93821be9479d672
|
"""
Program: set_membership.py
Author: Dylan Thomas
Last date modified: 10/17/2020
"""
def in_set(user_set, item):
if item in user_set:
return True
else:
return False
if __name__ == '__main__':
pass
|
[
"\"\"\"\nProgram: set_membership.py\nAuthor: Dylan Thomas\nLast date modified: 10/17/2020\n\"\"\"\n\ndef in_set(user_set, item):\n if item in user_set:\n return True\n else:\n return False\n\n\n\nif __name__ == '__main__':\n pass\n",
"<docstring token>\n\n\ndef in_set(user_set, item):\n if item in user_set:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n pass\n",
"<docstring token>\n\n\ndef in_set(user_set, item):\n if item in user_set:\n return True\n else:\n return False\n\n\n<code token>\n",
"<docstring token>\n<function token>\n<code token>\n"
] | false |
98,413 |
8bf0cf2cd9eedc7e9081a495f57f44bb4d241261
|
from module.typechanger import Type
class IPv4:
def __init__(self):
self.ip = str(input("Format: 127.0.0.1\nEnter IP Address: "))
self.ip_part = self.data = self.mask = None
def Validation_of_ip(self):
self.ip_part = self.ip.split('.', 4)
if len(self.ip_part) != 4:
return False
for i in self.ip_part:
if 255 < int(i) or int(i) < 0:
return False
return True
def Check_class(self):
if self.Validation_of_ip():
ip_first = int(self.ip_part[0])
if 0 <= ip_first <= 127:
Class = "A"
Application = "Unicast"
elif 128 <= ip_first <= 191:
Class = "B"
Application = "Unicast"
elif 192 <= ip_first <= 223:
Class = "C"
Application = "Unicast"
elif 224 <= ip_first <= 239:
Class = "D"
Application = "Multicast"
elif 240 <= ip_first <= 255:
Class = "E"
Application = "Reserved"
else:
return False
self.data = {
'ip': self.ip,
'class': Class,
'application': Application
}
return self.data
return False
def Subnet_mask(self):
data = self.Check_class()
if not data:
return False
if data['class'] == "A":
subnet = "255.0.0.0"
elif data['class'] == "B":
subnet = "255.255.0.0"
elif data['class'] == "C":
subnet = "255.255.255.0"
elif data['class'] == "D" or data['class'] == "E":
subnet = "255.255.255.255"
else:
return False
self.data['subnet'] = subnet
return self.data
def Mask(self):
data = self.Subnet_mask()
self.mask = data['mask'] = int(input("Enter mask: /"))
if self.mask < 0 or self.mask > 32:
return False
last_bits = 32 - self.mask
ip_binary_first = Type.Decimal_to_Binary(self.ip_part)
ip_binary_last = Type.Decimal_to_Binary(self.ip_part)
number_of_ip_list = []
ip_binary_first.reverse()
ip_binary_last.reverse()
for i in range(0, last_bits):
ip_binary_first[i] = 0
ip_binary_last[i] = 1
number_of_ip_list.append(1)
ip_binary_first.reverse()
ip_binary_last.reverse()
ip_binary_first = Type.Binary_to_Decimal(ip_binary_first)
ip_binary_last = Type.Binary_to_Decimal(ip_binary_last)
self.data['first_ip_address'] = ip_binary_first[0] + '.' + ip_binary_first[1] + '.' + ip_binary_first[2] + '.' + ip_binary_first[3]
self.data['last_ip_address'] = ip_binary_last[0] + '.' + ip_binary_last[1] + '.' + ip_binary_last[2] + '.' + ip_binary_last[3]
self.data['number_of_ip_addresses'] = Type.Binary_to_Decimal_int(number_of_ip_list) + 1
return self.data
def print_ipv4(self):
if self.Mask() is None:
return False
print("\n\tData Retrived:\nGiven IP:", self.data['ip'])
print("Class:", self.data['class'], "\nApplication:", self.data['application'])
print("Subnet Mask:", self.data['subnet'], "\nMask:", self.data['mask'])
print("First IP of Network or Default Gateway:", self.data['first_ip_address'])
print("Last IP of Network:", self.data['last_ip_address'])
print("Total IPs in Network:", self.data['number_of_ip_addresses'])
|
[
"from module.typechanger import Type\n\n\nclass IPv4:\n def __init__(self):\n self.ip = str(input(\"Format: 127.0.0.1\\nEnter IP Address: \"))\n self.ip_part = self.data = self.mask = None\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = \"A\"\n Application = \"Unicast\"\n elif 128 <= ip_first <= 191:\n Class = \"B\"\n Application = \"Unicast\"\n elif 192 <= ip_first <= 223:\n Class = \"C\"\n Application = \"Unicast\"\n elif 224 <= ip_first <= 239:\n Class = \"D\"\n Application = \"Multicast\"\n elif 240 <= ip_first <= 255:\n Class = \"E\"\n Application = \"Reserved\"\n else:\n return False\n self.data = {\n 'ip': self.ip,\n 'class': Class,\n 'application': Application\n }\n return self.data\n return False\n\n def Subnet_mask(self):\n data = self.Check_class()\n if not data:\n return False\n if data['class'] == \"A\":\n subnet = \"255.0.0.0\"\n elif data['class'] == \"B\":\n subnet = \"255.255.0.0\"\n elif data['class'] == \"C\":\n subnet = \"255.255.255.0\"\n elif data['class'] == \"D\" or data['class'] == \"E\":\n subnet = \"255.255.255.255\"\n else:\n return False\n self.data['subnet'] = subnet\n return self.data\n\n def Mask(self):\n data = self.Subnet_mask()\n self.mask = data['mask'] = int(input(\"Enter mask: /\"))\n if self.mask < 0 or self.mask > 32:\n return False\n last_bits = 32 - self.mask\n\n ip_binary_first = Type.Decimal_to_Binary(self.ip_part)\n ip_binary_last = Type.Decimal_to_Binary(self.ip_part)\n number_of_ip_list = []\n ip_binary_first.reverse()\n ip_binary_last.reverse()\n\n for i in range(0, last_bits):\n ip_binary_first[i] = 0\n ip_binary_last[i] = 1\n number_of_ip_list.append(1)\n\n ip_binary_first.reverse()\n ip_binary_last.reverse()\n\n ip_binary_first = Type.Binary_to_Decimal(ip_binary_first)\n ip_binary_last = Type.Binary_to_Decimal(ip_binary_last)\n self.data['first_ip_address'] = ip_binary_first[0] + '.' + ip_binary_first[1] + '.' + ip_binary_first[2] + '.' + ip_binary_first[3]\n self.data['last_ip_address'] = ip_binary_last[0] + '.' + ip_binary_last[1] + '.' + ip_binary_last[2] + '.' + ip_binary_last[3]\n self.data['number_of_ip_addresses'] = Type.Binary_to_Decimal_int(number_of_ip_list) + 1\n return self.data\n\n def print_ipv4(self):\n if self.Mask() is None:\n return False\n print(\"\\n\\tData Retrived:\\nGiven IP:\", self.data['ip'])\n print(\"Class:\", self.data['class'], \"\\nApplication:\", self.data['application'])\n print(\"Subnet Mask:\", self.data['subnet'], \"\\nMask:\", self.data['mask'])\n print(\"First IP of Network or Default Gateway:\", self.data['first_ip_address'])\n print(\"Last IP of Network:\", self.data['last_ip_address'])\n print(\"Total IPs in Network:\", self.data['number_of_ip_addresses'])\n\n",
"from module.typechanger import Type\n\n\nclass IPv4:\n\n def __init__(self):\n self.ip = str(input('Format: 127.0.0.1\\nEnter IP Address: '))\n self.ip_part = self.data = self.mask = None\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = 'A'\n Application = 'Unicast'\n elif 128 <= ip_first <= 191:\n Class = 'B'\n Application = 'Unicast'\n elif 192 <= ip_first <= 223:\n Class = 'C'\n Application = 'Unicast'\n elif 224 <= ip_first <= 239:\n Class = 'D'\n Application = 'Multicast'\n elif 240 <= ip_first <= 255:\n Class = 'E'\n Application = 'Reserved'\n else:\n return False\n self.data = {'ip': self.ip, 'class': Class, 'application':\n Application}\n return self.data\n return False\n\n def Subnet_mask(self):\n data = self.Check_class()\n if not data:\n return False\n if data['class'] == 'A':\n subnet = '255.0.0.0'\n elif data['class'] == 'B':\n subnet = '255.255.0.0'\n elif data['class'] == 'C':\n subnet = '255.255.255.0'\n elif data['class'] == 'D' or data['class'] == 'E':\n subnet = '255.255.255.255'\n else:\n return False\n self.data['subnet'] = subnet\n return self.data\n\n def Mask(self):\n data = self.Subnet_mask()\n self.mask = data['mask'] = int(input('Enter mask: /'))\n if self.mask < 0 or self.mask > 32:\n return False\n last_bits = 32 - self.mask\n ip_binary_first = Type.Decimal_to_Binary(self.ip_part)\n ip_binary_last = Type.Decimal_to_Binary(self.ip_part)\n number_of_ip_list = []\n ip_binary_first.reverse()\n ip_binary_last.reverse()\n for i in range(0, last_bits):\n ip_binary_first[i] = 0\n ip_binary_last[i] = 1\n number_of_ip_list.append(1)\n ip_binary_first.reverse()\n ip_binary_last.reverse()\n ip_binary_first = Type.Binary_to_Decimal(ip_binary_first)\n ip_binary_last = Type.Binary_to_Decimal(ip_binary_last)\n self.data['first_ip_address'] = ip_binary_first[0\n ] + '.' + ip_binary_first[1] + '.' + ip_binary_first[2\n ] + '.' + ip_binary_first[3]\n self.data['last_ip_address'] = ip_binary_last[0\n ] + '.' + ip_binary_last[1] + '.' + ip_binary_last[2\n ] + '.' + ip_binary_last[3]\n self.data['number_of_ip_addresses'] = Type.Binary_to_Decimal_int(\n number_of_ip_list) + 1\n return self.data\n\n def print_ipv4(self):\n if self.Mask() is None:\n return False\n print('\\n\\tData Retrived:\\nGiven IP:', self.data['ip'])\n print('Class:', self.data['class'], '\\nApplication:', self.data[\n 'application'])\n print('Subnet Mask:', self.data['subnet'], '\\nMask:', self.data['mask']\n )\n print('First IP of Network or Default Gateway:', self.data[\n 'first_ip_address'])\n print('Last IP of Network:', self.data['last_ip_address'])\n print('Total IPs in Network:', self.data['number_of_ip_addresses'])\n",
"<import token>\n\n\nclass IPv4:\n\n def __init__(self):\n self.ip = str(input('Format: 127.0.0.1\\nEnter IP Address: '))\n self.ip_part = self.data = self.mask = None\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = 'A'\n Application = 'Unicast'\n elif 128 <= ip_first <= 191:\n Class = 'B'\n Application = 'Unicast'\n elif 192 <= ip_first <= 223:\n Class = 'C'\n Application = 'Unicast'\n elif 224 <= ip_first <= 239:\n Class = 'D'\n Application = 'Multicast'\n elif 240 <= ip_first <= 255:\n Class = 'E'\n Application = 'Reserved'\n else:\n return False\n self.data = {'ip': self.ip, 'class': Class, 'application':\n Application}\n return self.data\n return False\n\n def Subnet_mask(self):\n data = self.Check_class()\n if not data:\n return False\n if data['class'] == 'A':\n subnet = '255.0.0.0'\n elif data['class'] == 'B':\n subnet = '255.255.0.0'\n elif data['class'] == 'C':\n subnet = '255.255.255.0'\n elif data['class'] == 'D' or data['class'] == 'E':\n subnet = '255.255.255.255'\n else:\n return False\n self.data['subnet'] = subnet\n return self.data\n\n def Mask(self):\n data = self.Subnet_mask()\n self.mask = data['mask'] = int(input('Enter mask: /'))\n if self.mask < 0 or self.mask > 32:\n return False\n last_bits = 32 - self.mask\n ip_binary_first = Type.Decimal_to_Binary(self.ip_part)\n ip_binary_last = Type.Decimal_to_Binary(self.ip_part)\n number_of_ip_list = []\n ip_binary_first.reverse()\n ip_binary_last.reverse()\n for i in range(0, last_bits):\n ip_binary_first[i] = 0\n ip_binary_last[i] = 1\n number_of_ip_list.append(1)\n ip_binary_first.reverse()\n ip_binary_last.reverse()\n ip_binary_first = Type.Binary_to_Decimal(ip_binary_first)\n ip_binary_last = Type.Binary_to_Decimal(ip_binary_last)\n self.data['first_ip_address'] = ip_binary_first[0\n ] + '.' + ip_binary_first[1] + '.' + ip_binary_first[2\n ] + '.' + ip_binary_first[3]\n self.data['last_ip_address'] = ip_binary_last[0\n ] + '.' + ip_binary_last[1] + '.' + ip_binary_last[2\n ] + '.' + ip_binary_last[3]\n self.data['number_of_ip_addresses'] = Type.Binary_to_Decimal_int(\n number_of_ip_list) + 1\n return self.data\n\n def print_ipv4(self):\n if self.Mask() is None:\n return False\n print('\\n\\tData Retrived:\\nGiven IP:', self.data['ip'])\n print('Class:', self.data['class'], '\\nApplication:', self.data[\n 'application'])\n print('Subnet Mask:', self.data['subnet'], '\\nMask:', self.data['mask']\n )\n print('First IP of Network or Default Gateway:', self.data[\n 'first_ip_address'])\n print('Last IP of Network:', self.data['last_ip_address'])\n print('Total IPs in Network:', self.data['number_of_ip_addresses'])\n",
"<import token>\n\n\nclass IPv4:\n\n def __init__(self):\n self.ip = str(input('Format: 127.0.0.1\\nEnter IP Address: '))\n self.ip_part = self.data = self.mask = None\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = 'A'\n Application = 'Unicast'\n elif 128 <= ip_first <= 191:\n Class = 'B'\n Application = 'Unicast'\n elif 192 <= ip_first <= 223:\n Class = 'C'\n Application = 'Unicast'\n elif 224 <= ip_first <= 239:\n Class = 'D'\n Application = 'Multicast'\n elif 240 <= ip_first <= 255:\n Class = 'E'\n Application = 'Reserved'\n else:\n return False\n self.data = {'ip': self.ip, 'class': Class, 'application':\n Application}\n return self.data\n return False\n\n def Subnet_mask(self):\n data = self.Check_class()\n if not data:\n return False\n if data['class'] == 'A':\n subnet = '255.0.0.0'\n elif data['class'] == 'B':\n subnet = '255.255.0.0'\n elif data['class'] == 'C':\n subnet = '255.255.255.0'\n elif data['class'] == 'D' or data['class'] == 'E':\n subnet = '255.255.255.255'\n else:\n return False\n self.data['subnet'] = subnet\n return self.data\n <function token>\n\n def print_ipv4(self):\n if self.Mask() is None:\n return False\n print('\\n\\tData Retrived:\\nGiven IP:', self.data['ip'])\n print('Class:', self.data['class'], '\\nApplication:', self.data[\n 'application'])\n print('Subnet Mask:', self.data['subnet'], '\\nMask:', self.data['mask']\n )\n print('First IP of Network or Default Gateway:', self.data[\n 'first_ip_address'])\n print('Last IP of Network:', self.data['last_ip_address'])\n print('Total IPs in Network:', self.data['number_of_ip_addresses'])\n",
"<import token>\n\n\nclass IPv4:\n\n def __init__(self):\n self.ip = str(input('Format: 127.0.0.1\\nEnter IP Address: '))\n self.ip_part = self.data = self.mask = None\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = 'A'\n Application = 'Unicast'\n elif 128 <= ip_first <= 191:\n Class = 'B'\n Application = 'Unicast'\n elif 192 <= ip_first <= 223:\n Class = 'C'\n Application = 'Unicast'\n elif 224 <= ip_first <= 239:\n Class = 'D'\n Application = 'Multicast'\n elif 240 <= ip_first <= 255:\n Class = 'E'\n Application = 'Reserved'\n else:\n return False\n self.data = {'ip': self.ip, 'class': Class, 'application':\n Application}\n return self.data\n return False\n <function token>\n <function token>\n\n def print_ipv4(self):\n if self.Mask() is None:\n return False\n print('\\n\\tData Retrived:\\nGiven IP:', self.data['ip'])\n print('Class:', self.data['class'], '\\nApplication:', self.data[\n 'application'])\n print('Subnet Mask:', self.data['subnet'], '\\nMask:', self.data['mask']\n )\n print('First IP of Network or Default Gateway:', self.data[\n 'first_ip_address'])\n print('Last IP of Network:', self.data['last_ip_address'])\n print('Total IPs in Network:', self.data['number_of_ip_addresses'])\n",
"<import token>\n\n\nclass IPv4:\n\n def __init__(self):\n self.ip = str(input('Format: 127.0.0.1\\nEnter IP Address: '))\n self.ip_part = self.data = self.mask = None\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = 'A'\n Application = 'Unicast'\n elif 128 <= ip_first <= 191:\n Class = 'B'\n Application = 'Unicast'\n elif 192 <= ip_first <= 223:\n Class = 'C'\n Application = 'Unicast'\n elif 224 <= ip_first <= 239:\n Class = 'D'\n Application = 'Multicast'\n elif 240 <= ip_first <= 255:\n Class = 'E'\n Application = 'Reserved'\n else:\n return False\n self.data = {'ip': self.ip, 'class': Class, 'application':\n Application}\n return self.data\n return False\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass IPv4:\n <function token>\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n\n def Check_class(self):\n if self.Validation_of_ip():\n ip_first = int(self.ip_part[0])\n if 0 <= ip_first <= 127:\n Class = 'A'\n Application = 'Unicast'\n elif 128 <= ip_first <= 191:\n Class = 'B'\n Application = 'Unicast'\n elif 192 <= ip_first <= 223:\n Class = 'C'\n Application = 'Unicast'\n elif 224 <= ip_first <= 239:\n Class = 'D'\n Application = 'Multicast'\n elif 240 <= ip_first <= 255:\n Class = 'E'\n Application = 'Reserved'\n else:\n return False\n self.data = {'ip': self.ip, 'class': Class, 'application':\n Application}\n return self.data\n return False\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass IPv4:\n <function token>\n\n def Validation_of_ip(self):\n self.ip_part = self.ip.split('.', 4)\n if len(self.ip_part) != 4:\n return False\n for i in self.ip_part:\n if 255 < int(i) or int(i) < 0:\n return False\n return True\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass IPv4:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,414 |
aaf190b6f518cebee4da39860e9776a24fa4643c
|
from django.db.models import Count
from usaspending_api.awards.models import TransactionNormalized
def find_related_awards(transactions):
related_award_ids = transactions.values_list("award_id", flat=True)
tn_count = (
TransactionNormalized.objects.filter(award_id__in=related_award_ids)
.values("award_id")
.annotate(transaction_count=Count("id"))
.values_list("award_id", "transaction_count")
)
tn_count_filtered = (
transactions.values("award_id")
.annotate(transaction_count=Count("id"))
.values_list("award_id", "transaction_count")
)
tn_count_mapping = dict(tn_count)
tn_count_filtered_mapping = dict(tn_count_filtered)
# only delete awards if and only if all their transactions are deleted, otherwise update the award
update_awards = [
award_id
for award_id, transaction_count in tn_count_mapping.items()
if tn_count_filtered_mapping[award_id] != transaction_count
]
delete_awards = [
award_id
for award_id, transaction_count in tn_count_mapping.items()
if tn_count_filtered_mapping[award_id] == transaction_count
]
return update_awards, delete_awards
|
[
"from django.db.models import Count\nfrom usaspending_api.awards.models import TransactionNormalized\n\n\ndef find_related_awards(transactions):\n related_award_ids = transactions.values_list(\"award_id\", flat=True)\n tn_count = (\n TransactionNormalized.objects.filter(award_id__in=related_award_ids)\n .values(\"award_id\")\n .annotate(transaction_count=Count(\"id\"))\n .values_list(\"award_id\", \"transaction_count\")\n )\n tn_count_filtered = (\n transactions.values(\"award_id\")\n .annotate(transaction_count=Count(\"id\"))\n .values_list(\"award_id\", \"transaction_count\")\n )\n tn_count_mapping = dict(tn_count)\n tn_count_filtered_mapping = dict(tn_count_filtered)\n # only delete awards if and only if all their transactions are deleted, otherwise update the award\n update_awards = [\n award_id\n for award_id, transaction_count in tn_count_mapping.items()\n if tn_count_filtered_mapping[award_id] != transaction_count\n ]\n delete_awards = [\n award_id\n for award_id, transaction_count in tn_count_mapping.items()\n if tn_count_filtered_mapping[award_id] == transaction_count\n ]\n return update_awards, delete_awards\n",
"from django.db.models import Count\nfrom usaspending_api.awards.models import TransactionNormalized\n\n\ndef find_related_awards(transactions):\n related_award_ids = transactions.values_list('award_id', flat=True)\n tn_count = TransactionNormalized.objects.filter(award_id__in=\n related_award_ids).values('award_id').annotate(transaction_count=\n Count('id')).values_list('award_id', 'transaction_count')\n tn_count_filtered = transactions.values('award_id').annotate(\n transaction_count=Count('id')).values_list('award_id',\n 'transaction_count')\n tn_count_mapping = dict(tn_count)\n tn_count_filtered_mapping = dict(tn_count_filtered)\n update_awards = [award_id for award_id, transaction_count in\n tn_count_mapping.items() if tn_count_filtered_mapping[award_id] !=\n transaction_count]\n delete_awards = [award_id for award_id, transaction_count in\n tn_count_mapping.items() if tn_count_filtered_mapping[award_id] ==\n transaction_count]\n return update_awards, delete_awards\n",
"<import token>\n\n\ndef find_related_awards(transactions):\n related_award_ids = transactions.values_list('award_id', flat=True)\n tn_count = TransactionNormalized.objects.filter(award_id__in=\n related_award_ids).values('award_id').annotate(transaction_count=\n Count('id')).values_list('award_id', 'transaction_count')\n tn_count_filtered = transactions.values('award_id').annotate(\n transaction_count=Count('id')).values_list('award_id',\n 'transaction_count')\n tn_count_mapping = dict(tn_count)\n tn_count_filtered_mapping = dict(tn_count_filtered)\n update_awards = [award_id for award_id, transaction_count in\n tn_count_mapping.items() if tn_count_filtered_mapping[award_id] !=\n transaction_count]\n delete_awards = [award_id for award_id, transaction_count in\n tn_count_mapping.items() if tn_count_filtered_mapping[award_id] ==\n transaction_count]\n return update_awards, delete_awards\n",
"<import token>\n<function token>\n"
] | false |
98,415 |
12c734ce16676186aa1716ecd4679130e71dbc7d
|
#!/usr/bin/env python3
from utils.irc import IRCHandler
from utils.threads import HandlerThread
from utils.events import Event
from utils.events import Standard as StandardEvents
from utils.sstate import DotDict
from utils.decorators import IRCCallback
from utils import now
import re
import traceback
import sys
import json
import threading
import datetime
import time
import ssl
import plugins
from plugins.auth import User
VERSION = "1.0.0"
class Infobot(IRCHandler):
""" Infobot main class """
def __init__(self, config):
print("Infobot version %s" % (VERSION))
super().__init__(config, verbose=False)
self.config = config
self.nick = config["nick"]
# Arbitrary bot data
self.data = {}
self.auth = None
self.lock = threading.Lock()
self.lock.acquire()
self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0])
for i in StandardEvents})
self.cmd_thread = HandlerThread(self, self.lock)
self.cmd_thread.daemon = True
self.register_callbacks()
self.register_plugins(plugins.get_plugins())
for item in self.config["admins"]:
self.auth.addadmin(User(item[0], item[1], item[2]))
def __repr__(self):
return "Infobot(server=%r)" % (self.config["server"].split(':')[0])
def register_callback(self, ctype, func):
if ctype in self.__irccallbacks__:
self.__irccallbacks__[ctype].append(func)
else:
self.__irccallbacks__[ctype] = [func]
def _msg(self, chan, msg):
self.sock.send(b"PRIVMSG ")
self.sock.send(("%s :%s" % (chan, msg)).encode('utf-8'))
self.sock.send(b"\r\n")
def notice(self, chan, msg):
self.sock.send(b"NOTICE ")
self.sock.send(("%s :%s" % (chan, msg)).encode('utf-8'))
self.sock.send(b"\r\n")
def msg(self, chan, msg):
msg = str(msg).replace("\r", "")
if '\n' in msg:
for item in msg.split("\n"):
self._msg(chan, item)
else:
self._msg(chan, msg)
@IRCCallback("INVITE")
def handleinvite(self, pmsg):
bot._send("JOIN :" + pmsg["arg"].split(":")[1])
def switch(self):
self.lock.release()
time.sleep(0.01)
self.lock.acquire()
@IRCCallback("MODE")
def mode(self, msg):
""" Handle MODE. """
if not msg["arg"].startswith("#"):
self.nick = msg["arg"].split(" ", 1)[0]
def connect(self):
self.cmd_thread.start()
if self.config["ssl"]:
self.sock = ssl.wrap_socket(self.sock)
super().connect()
def msg(self, chan, msg):
""" Send a message to a channel. """
self._msg(chan, msg)
@IRCCallback("PRIVMSG")
def privmsg(self, msg):
""" Handles messages. """
nick = msg["host"].split('!')[0]
chan = msg["arg"].split()[0]
chan = chan.lower()
if not chan.startswith("#"):
# Private message. File under sender.
chan = nick
msg = msg["arg"].split(":", 1)[1]
self.events.MessageEvent.fire(self, nick, chan, msg)
print("[main thread:%s] [%s] <%s> %s" % (now(), chan, nick, msg))
@IRCCallback("NOTICE")
def notice_listener(self, msg):
sys.__stdout__.write("[main thread:%s] *%s* %s\n" % (now(), msg["host"], msg["arg"].split(" ", 1)[1][1:]))
sys.__stdout__.flush()
def register_plugins(self, plugins):
for plugin in plugins:
print("[main thread:%s] processing plugin %s" % (now(), plugin.__file__))
if hasattr(plugin, "__callbacks__"):
for k, v in plugin.__callbacks__.items():
for i in v:
try:
self.__irccallbacks__[k].append(i)
except KeyError:
self.__irccallbacks__[k] = [i]
if hasattr(plugin, "__inits__"):
for init in plugin.__inits__:
init(self)
@IRCCallback("376", "422")
def welcome(self, msg):
if self.config.get("ident_pass", None):
self._msg(self.config["ident_service"], "identify %s"
% (self.config["ident_pass"]))
self._send("MODE %s +B" % (self.nick))
for channel in self.config["autojoin"]:
self._send("JOIN :%s" % (channel))
def gracefully_terminate(self):
super().gracefully_terminate()
if __name__ == "__main__":
config = json.load(open("config.json", "r"))
bot = Infobot(config)
bot.connect()
|
[
"#!/usr/bin/env python3\nfrom utils.irc import IRCHandler\nfrom utils.threads import HandlerThread\nfrom utils.events import Event\nfrom utils.events import Standard as StandardEvents\nfrom utils.sstate import DotDict\nfrom utils.decorators import IRCCallback\nfrom utils import now\n\nimport re\nimport traceback\nimport sys\nimport json\nimport threading\nimport datetime\nimport time\nimport ssl\n\nimport plugins\nfrom plugins.auth import User\n\nVERSION = \"1.0.0\"\n\nclass Infobot(IRCHandler):\n \"\"\" Infobot main class \"\"\"\n def __init__(self, config):\n print(\"Infobot version %s\" % (VERSION))\n super().__init__(config, verbose=False)\n\n self.config = config\n self.nick = config[\"nick\"]\n\n # Arbitrary bot data\n self.data = {}\n self.auth = None\n\n self.lock = threading.Lock()\n self.lock.acquire()\n\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0])\n for i in StandardEvents})\n\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n\n for item in self.config[\"admins\"]:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return \"Infobot(server=%r)\" % (self.config[\"server\"].split(':')[0])\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b\"PRIVMSG \")\n self.sock.send((\"%s :%s\" % (chan, msg)).encode('utf-8'))\n self.sock.send(b\"\\r\\n\")\n\n def notice(self, chan, msg):\n self.sock.send(b\"NOTICE \")\n self.sock.send((\"%s :%s\" % (chan, msg)).encode('utf-8'))\n self.sock.send(b\"\\r\\n\")\n\n def msg(self, chan, msg):\n msg = str(msg).replace(\"\\r\", \"\")\n if '\\n' in msg:\n for item in msg.split(\"\\n\"):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback(\"INVITE\")\n def handleinvite(self, pmsg):\n bot._send(\"JOIN :\" + pmsg[\"arg\"].split(\":\")[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback(\"MODE\")\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg[\"arg\"].startswith(\"#\"):\n self.nick = msg[\"arg\"].split(\" \", 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config[\"ssl\"]:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback(\"PRIVMSG\")\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg[\"host\"].split('!')[0]\n chan = msg[\"arg\"].split()[0]\n chan = chan.lower()\n if not chan.startswith(\"#\"):\n # Private message. File under sender.\n chan = nick\n msg = msg[\"arg\"].split(\":\", 1)[1]\n\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print(\"[main thread:%s] [%s] <%s> %s\" % (now(), chan, nick, msg))\n\n @IRCCallback(\"NOTICE\")\n def notice_listener(self, msg):\n sys.__stdout__.write(\"[main thread:%s] *%s* %s\\n\" % (now(), msg[\"host\"], msg[\"arg\"].split(\" \", 1)[1][1:]))\n sys.__stdout__.flush()\n\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print(\"[main thread:%s] processing plugin %s\" % (now(), plugin.__file__))\n if hasattr(plugin, \"__callbacks__\"):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, \"__inits__\"):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback(\"376\", \"422\")\n def welcome(self, msg):\n if self.config.get(\"ident_pass\", None):\n self._msg(self.config[\"ident_service\"], \"identify %s\"\n % (self.config[\"ident_pass\"]))\n self._send(\"MODE %s +B\" % (self.nick))\n\n for channel in self.config[\"autojoin\"]:\n self._send(\"JOIN :%s\" % (channel))\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\nif __name__ == \"__main__\":\n config = json.load(open(\"config.json\", \"r\"))\n bot = Infobot(config)\n bot.connect()\n",
"from utils.irc import IRCHandler\nfrom utils.threads import HandlerThread\nfrom utils.events import Event\nfrom utils.events import Standard as StandardEvents\nfrom utils.sstate import DotDict\nfrom utils.decorators import IRCCallback\nfrom utils import now\nimport re\nimport traceback\nimport sys\nimport json\nimport threading\nimport datetime\nimport time\nimport ssl\nimport plugins\nfrom plugins.auth import User\nVERSION = '1.0.0'\n\n\nclass Infobot(IRCHandler):\n \"\"\" Infobot main class \"\"\"\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def notice(self, chan, msg):\n self.sock.send(b'NOTICE ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\nif __name__ == '__main__':\n config = json.load(open('config.json', 'r'))\n bot = Infobot(config)\n bot.connect()\n",
"<import token>\nVERSION = '1.0.0'\n\n\nclass Infobot(IRCHandler):\n \"\"\" Infobot main class \"\"\"\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def notice(self, chan, msg):\n self.sock.send(b'NOTICE ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\nif __name__ == '__main__':\n config = json.load(open('config.json', 'r'))\n bot = Infobot(config)\n bot.connect()\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n \"\"\" Infobot main class \"\"\"\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def notice(self, chan, msg):\n self.sock.send(b'NOTICE ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\nif __name__ == '__main__':\n config = json.load(open('config.json', 'r'))\n bot = Infobot(config)\n bot.connect()\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n \"\"\" Infobot main class \"\"\"\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def notice(self, chan, msg):\n self.sock.send(b'NOTICE ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def notice(self, chan, msg):\n self.sock.send(b'NOTICE ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n\n def switch(self):\n self.lock.release()\n time.sleep(0.01)\n self.lock.acquire()\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n\n @IRCCallback('PRIVMSG')\n def privmsg(self, msg):\n \"\"\" Handles messages. \"\"\"\n nick = msg['host'].split('!')[0]\n chan = msg['arg'].split()[0]\n chan = chan.lower()\n if not chan.startswith('#'):\n chan = nick\n msg = msg['arg'].split(':', 1)[1]\n self.events.MessageEvent.fire(self, nick, chan, msg)\n print('[main thread:%s] [%s] <%s> %s' % (now(), chan, nick, msg))\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n\n def __repr__(self):\n return 'Infobot(server=%r)' % self.config['server'].split(':')[0]\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n <function token>\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n <function token>\n\n @IRCCallback('NOTICE')\n def notice_listener(self, msg):\n sys.__stdout__.write('[main thread:%s] *%s* %s\\n' % (now(), msg[\n 'host'], msg['arg'].split(' ', 1)[1][1:]))\n sys.__stdout__.flush()\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n\n def msg(self, chan, msg):\n \"\"\" Send a message to a channel. \"\"\"\n self._msg(chan, msg)\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n\n def connect(self):\n self.cmd_thread.start()\n if self.config['ssl']:\n self.sock = ssl.wrap_socket(self.sock)\n super().connect()\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n\n def msg(self, chan, msg):\n msg = str(msg).replace('\\r', '')\n if '\\n' in msg:\n for item in msg.split('\\n'):\n self._msg(chan, item)\n else:\n self._msg(chan, msg)\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n\n def __init__(self, config):\n print('Infobot version %s' % VERSION)\n super().__init__(config, verbose=False)\n self.config = config\n self.nick = config['nick']\n self.data = {}\n self.auth = None\n self.lock = threading.Lock()\n self.lock.acquire()\n self.events = DotDict({list(i.keys())[0]: Event(list(i.values())[0]\n ) for i in StandardEvents})\n self.cmd_thread = HandlerThread(self, self.lock)\n self.cmd_thread.daemon = True\n self.register_callbacks()\n self.register_plugins(plugins.get_plugins())\n for item in self.config['admins']:\n self.auth.addadmin(User(item[0], item[1], item[2]))\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n <function token>\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n <function token>\n\n @IRCCallback('INVITE')\n def handleinvite(self, pmsg):\n bot._send('JOIN :' + pmsg['arg'].split(':')[1])\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n <function token>\n <function token>\n <function token>\n\n @IRCCallback('MODE')\n def mode(self, msg):\n \"\"\" Handle MODE. \"\"\"\n if not msg['arg'].startswith('#'):\n self.nick = msg['arg'].split(' ', 1)[0]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n\n def gracefully_terminate(self):\n super().gracefully_terminate()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n\n def _msg(self, chan, msg):\n self.sock.send(b'PRIVMSG ')\n self.sock.send(('%s :%s' % (chan, msg)).encode('utf-8'))\n self.sock.send(b'\\r\\n')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n\n def register_callback(self, ctype, func):\n if ctype in self.__irccallbacks__:\n self.__irccallbacks__[ctype].append(func)\n else:\n self.__irccallbacks__[ctype] = [func]\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n\n @IRCCallback('376', '422')\n def welcome(self, msg):\n if self.config.get('ident_pass', None):\n self._msg(self.config['ident_service'], 'identify %s' % self.\n config['ident_pass'])\n self._send('MODE %s +B' % self.nick)\n for channel in self.config['autojoin']:\n self._send('JOIN :%s' % channel)\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def register_plugins(self, plugins):\n for plugin in plugins:\n print('[main thread:%s] processing plugin %s' % (now(), plugin.\n __file__))\n if hasattr(plugin, '__callbacks__'):\n for k, v in plugin.__callbacks__.items():\n for i in v:\n try:\n self.__irccallbacks__[k].append(i)\n except KeyError:\n self.__irccallbacks__[k] = [i]\n if hasattr(plugin, '__inits__'):\n for init in plugin.__inits__:\n init(self)\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Infobot(IRCHandler):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
98,416 |
3c5a43f592705c986bd53941dae3d8fca8899fc9
|
#------------------------------------------------------------------------------
# Copyright (c) 2008, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Riverbank Computing Limited
# Description: <Enthought permissions package component>
#------------------------------------------------------------------------------
# Enthought library imports.
from enthought.traits.api import HasTraits, Instance, List, Unicode
from enthought.traits.ui.api import Item, TableEditor, View
from enthought.traits.ui.menu import OKCancelButtons
from enthought.traits.ui.table_column import ObjectColumn
class _User(HasTraits):
"""This represents the user model."""
#### '_User' interface ####################################################
# The user name.
name = Unicode
# The user description.
description = Unicode
class _UsersView(HasTraits):
"""This represents the view used to select a user."""
#### '_UsersView' interface ###############################################
# The list of users to select from.
model = List(_User)
# The selected user.
selection = Instance(_User)
# The editor used by the view.
table_editor = TableEditor(columns=[ObjectColumn(name='name'),
ObjectColumn(name='description')],
selected='selection', sort_model=True, configurable=False)
# The default view.
traits_view = View(Item('model', show_label=False, editor=table_editor),
title="Select a User", style='readonly', kind='modal',
buttons=OKCancelButtons)
def select_user(users):
"""Return a single user from the given list of users."""
# Construct the model.
model = [_User(name=name, description=description)
for name, description in users]
# Construct the view.
view = _UsersView(model=model)
if view.configure_traits() and view.selection is not None:
user = view.selection.name, view.selection.description
else:
user = '', ''
return user
|
[
"#------------------------------------------------------------------------------\n# Copyright (c) 2008, Riverbank Computing Limited\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in enthought/LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n#\n# Author: Riverbank Computing Limited\n# Description: <Enthought permissions package component>\n#------------------------------------------------------------------------------\n\n\n# Enthought library imports.\nfrom enthought.traits.api import HasTraits, Instance, List, Unicode\nfrom enthought.traits.ui.api import Item, TableEditor, View\nfrom enthought.traits.ui.menu import OKCancelButtons\nfrom enthought.traits.ui.table_column import ObjectColumn\n\n\nclass _User(HasTraits):\n \"\"\"This represents the user model.\"\"\"\n\n #### '_User' interface ####################################################\n\n # The user name.\n name = Unicode\n\n # The user description.\n description = Unicode\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n\n #### '_UsersView' interface ###############################################\n\n # The list of users to select from.\n model = List(_User)\n\n # The selected user.\n selection = Instance(_User)\n\n # The editor used by the view.\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')],\n selected='selection', sort_model=True, configurable=False)\n\n # The default view.\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title=\"Select a User\", style='readonly', kind='modal',\n buttons=OKCancelButtons)\n\n\ndef select_user(users):\n \"\"\"Return a single user from the given list of users.\"\"\"\n\n # Construct the model.\n model = [_User(name=name, description=description)\n for name, description in users]\n\n # Construct the view.\n view = _UsersView(model=model)\n\n if view.configure_traits() and view.selection is not None:\n user = view.selection.name, view.selection.description\n else:\n user = '', ''\n\n return user\n",
"from enthought.traits.api import HasTraits, Instance, List, Unicode\nfrom enthought.traits.ui.api import Item, TableEditor, View\nfrom enthought.traits.ui.menu import OKCancelButtons\nfrom enthought.traits.ui.table_column import ObjectColumn\n\n\nclass _User(HasTraits):\n \"\"\"This represents the user model.\"\"\"\n name = Unicode\n description = Unicode\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\ndef select_user(users):\n \"\"\"Return a single user from the given list of users.\"\"\"\n model = [_User(name=name, description=description) for name,\n description in users]\n view = _UsersView(model=model)\n if view.configure_traits() and view.selection is not None:\n user = view.selection.name, view.selection.description\n else:\n user = '', ''\n return user\n",
"<import token>\n\n\nclass _User(HasTraits):\n \"\"\"This represents the user model.\"\"\"\n name = Unicode\n description = Unicode\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\ndef select_user(users):\n \"\"\"Return a single user from the given list of users.\"\"\"\n model = [_User(name=name, description=description) for name,\n description in users]\n view = _UsersView(model=model)\n if view.configure_traits() and view.selection is not None:\n user = view.selection.name, view.selection.description\n else:\n user = '', ''\n return user\n",
"<import token>\n\n\nclass _User(HasTraits):\n \"\"\"This represents the user model.\"\"\"\n name = Unicode\n description = Unicode\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\n<function token>\n",
"<import token>\n\n\nclass _User(HasTraits):\n <docstring token>\n name = Unicode\n description = Unicode\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\n<function token>\n",
"<import token>\n\n\nclass _User(HasTraits):\n <docstring token>\n <assignment token>\n <assignment token>\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\n<function token>\n",
"<import token>\n<class token>\n\n\nclass _UsersView(HasTraits):\n \"\"\"This represents the view used to select a user.\"\"\"\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\n<function token>\n",
"<import token>\n<class token>\n\n\nclass _UsersView(HasTraits):\n <docstring token>\n model = List(_User)\n selection = Instance(_User)\n table_editor = TableEditor(columns=[ObjectColumn(name='name'),\n ObjectColumn(name='description')], selected='selection', sort_model\n =True, configurable=False)\n traits_view = View(Item('model', show_label=False, editor=table_editor),\n title='Select a User', style='readonly', kind='modal', buttons=\n OKCancelButtons)\n\n\n<function token>\n",
"<import token>\n<class token>\n\n\nclass _UsersView(HasTraits):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<function token>\n",
"<import token>\n<class token>\n<class token>\n<function token>\n"
] | false |
98,417 |
2264fdbbf71982e3ac45d180725e27eaef82d116
|
import vocabulary
import dataset
import utils
import torch
import string
import os
SCRIPT_DIR = os.path.dirname(__file__)
DATA_FOLDER = 'data/'
def preprocess(s, lower=True):
s = s.translate(str.maketrans('', '', string.punctuation))
if lower:
s = s.lower()
return s
def load_sentimix(dataloader_params, language='spanish', use_balanced_loader=True, binary=False, allowed_words=None):
df = utils.process_sentimix(language=language)
labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}
if binary:
labels_map = {'negative': 0, 'positive': 1}
if language == 'spanish':
lang_id_dict = {'lang1': 0, 'lang2': 1}
id2lang = ['Eng', 'Spa', 'Unk']
else:
lang_id_dict = {'Eng': 0, 'Hin': 1}
id2lang = ['Eng', 'Hin']
vocab = vocabulary.Vocabulary(allowed_words=allowed_words)
for text in df['tokens']:
for word in preprocess(str(text)).split():
vocab.count_token(word)
vocab.build()
vocab.num_labels = len(labels_map)
vocab._i2l = list(labels_map.keys())
vocab._l2i = labels_map
vocab._id2lang = id2lang
if binary:
df = df[df.sentiment != 'neutral']
tokenizer = lambda x: vocab.sentence2IDs(x)
labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].map(labels_map).astype(int))
data_train = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'train', 'tokens']]
training_set = dataset.Dataset(data_train, labels_train)
labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map(labels_map).astype(int))
data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'test', 'tokens']]
language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in
df.loc[df.splitset_label == 'test', 'lang_id']]
test_set = dataset.Dataset(data_test, labels_test, language_test)
if use_balanced_loader:
dataloader_params['shuffle'] = False
train_generator = torch.utils.data.DataLoader(training_set,
sampler=utils.ImbalancedDatasetSampler(training_set),
**dataloader_params,
collate_fn=dataset.pad_and_sort_batch)
else:
train_generator = torch.utils.data.DataLoader(training_set,
**dataloader_params,
collate_fn=dataset.pad_and_sort_batch)
test_generator = torch.utils.data.DataLoader(test_set,
**dataloader_params,
collate_fn=dataset.pad_and_sort_batch)
return train_generator, test_generator, vocab
def load_sentimix_tokens(dataloader_params, language='spanish', use_balanced_loader=False, binary=False,
allowed_words=None):
df = utils.process_sentimix(language=language)
labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}
if binary:
labels_map = {'negative': 0, 'positive': 1}
if language == 'spanish':
lang_id_dict = {'lang1': 0, 'lang2': 1}
id2lang = ['Eng', 'Spa', 'Unk']
else:
lang_id_dict = {'Eng': 0, 'Hin': 1}
id2lang = ['Eng', 'Hin', 'Unk']
vocab = vocabulary.Vocabulary(allowed_words=allowed_words)
for text in df['tokens']:
for word in preprocess(str(text)).split():
vocab.count_token(word)
vocab.build(vocab_size=10000)
vocab_tokens = vocabulary.Vocabulary_tokens()
for text in df['tokens']:
for word in preprocess(str(text)).split():
for token in word:
vocab_tokens.count_token(token)
vocab_tokens.build(vocab_size=60)
vocab.num_labels = len(labels_map)
vocab._i2l = list(labels_map.keys())
vocab._l2i = labels_map
vocab._id2lang = id2lang
if binary:
df = df[df.sentiment != 'neutral']
tokenizer = lambda x: vocab.sentence2IDs(x)
tokenizer_words = lambda x: vocab_tokens.word2IDs(x)
labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].map(labels_map).astype(int))
data_train = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'train', 'tokens']]
data_train_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(preprocess(s))] for s in
df.loc[df.splitset_label == 'train', 'tokens']]
training_set = dataset.Dataset_tokens(data_train, data_train_tokens, labels_train)
labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map(labels_map).astype(int))
data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'test', 'tokens']]
data_test_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(preprocess(s))] for s in
df.loc[df.splitset_label == 'test', 'tokens']]
language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in
df.loc[df.splitset_label == 'test', 'lang_id']]
test_set = dataset.Dataset_tokens(data_test, data_test_tokens, labels_test, language_test)
if use_balanced_loader:
dataloader_params['shuffle'] = False
train_generator = torch.utils.data.DataLoader(training_set,
sampler=utils.ImbalancedDatasetSampler(training_set),
**dataloader_params,
collate_fn=dataset.pad_and_sort_batch_tokens)
else:
train_generator = torch.utils.data.DataLoader(training_set,
**dataloader_params,
collate_fn=dataset.pad_and_sort_batch_tokens)
test_generator = torch.utils.data.DataLoader(test_set,
**dataloader_params,
collate_fn=dataset.pad_and_sort_batch_tokens)
return train_generator, test_generator, vocab, vocab_tokens
|
[
"import vocabulary\nimport dataset\nimport utils\nimport torch\nimport string\nimport os\n\nSCRIPT_DIR = os.path.dirname(__file__)\nDATA_FOLDER = 'data/'\n\ndef preprocess(s, lower=True):\n s = s.translate(str.maketrans('', '', string.punctuation))\n\n if lower:\n s = s.lower()\n\n return s\n\ndef load_sentimix(dataloader_params, language='spanish', use_balanced_loader=True, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin']\n\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n\n vocab.build()\n\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n\n if binary:\n df = df[df.sentiment != 'neutral']\n\n tokenizer = lambda x: vocab.sentence2IDs(x)\n\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'train', 'tokens']]\n training_set = dataset.Dataset(data_train, labels_train)\n\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map(labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n\n test_set = dataset.Dataset(data_test, labels_test, language_test)\n\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set,\n sampler=utils.ImbalancedDatasetSampler(training_set),\n **dataloader_params,\n collate_fn=dataset.pad_and_sort_batch)\n else:\n train_generator = torch.utils.data.DataLoader(training_set,\n **dataloader_params,\n collate_fn=dataset.pad_and_sort_batch)\n\n test_generator = torch.utils.data.DataLoader(test_set,\n **dataloader_params,\n collate_fn=dataset.pad_and_sort_batch)\n\n return train_generator, test_generator, vocab\n\n\ndef load_sentimix_tokens(dataloader_params, language='spanish', use_balanced_loader=False, binary=False,\n allowed_words=None):\n\n df = utils.process_sentimix(language=language)\n\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin', 'Unk']\n\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n\n vocab.build(vocab_size=10000)\n\n vocab_tokens = vocabulary.Vocabulary_tokens()\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n for token in word:\n vocab_tokens.count_token(token)\n\n vocab_tokens.build(vocab_size=60)\n\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n\n if binary:\n df = df[df.sentiment != 'neutral']\n\n tokenizer = lambda x: vocab.sentence2IDs(x)\n tokenizer_words = lambda x: vocab_tokens.word2IDs(x)\n\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'train', 'tokens']]\n data_train_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(preprocess(s))] for s in\n df.loc[df.splitset_label == 'train', 'tokens']]\n\n training_set = dataset.Dataset_tokens(data_train, data_train_tokens, labels_train)\n\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map(labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label == 'test', 'tokens']]\n data_test_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(preprocess(s))] for s in\n df.loc[df.splitset_label == 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n\n test_set = dataset.Dataset_tokens(data_test, data_test_tokens, labels_test, language_test)\n\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set,\n sampler=utils.ImbalancedDatasetSampler(training_set),\n **dataloader_params,\n collate_fn=dataset.pad_and_sort_batch_tokens)\n else:\n train_generator = torch.utils.data.DataLoader(training_set,\n **dataloader_params,\n collate_fn=dataset.pad_and_sort_batch_tokens)\n\n test_generator = torch.utils.data.DataLoader(test_set,\n **dataloader_params,\n collate_fn=dataset.pad_and_sort_batch_tokens)\n\n return train_generator, test_generator, vocab, vocab_tokens",
"import vocabulary\nimport dataset\nimport utils\nimport torch\nimport string\nimport os\nSCRIPT_DIR = os.path.dirname(__file__)\nDATA_FOLDER = 'data/'\n\n\ndef preprocess(s, lower=True):\n s = s.translate(str.maketrans('', '', string.punctuation))\n if lower:\n s = s.lower()\n return s\n\n\ndef load_sentimix(dataloader_params, language='spanish',\n use_balanced_loader=True, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build()\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n training_set = dataset.Dataset(data_train, labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset(data_test, labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n return train_generator, test_generator, vocab\n\n\ndef load_sentimix_tokens(dataloader_params, language='spanish',\n use_balanced_loader=False, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin', 'Unk']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build(vocab_size=10000)\n vocab_tokens = vocabulary.Vocabulary_tokens()\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n for token in word:\n vocab_tokens.count_token(token)\n vocab_tokens.build(vocab_size=60)\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n tokenizer_words = lambda x: vocab_tokens.word2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n data_train_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(\n preprocess(s))] for s in df.loc[df.splitset_label == 'train', 'tokens']\n ]\n training_set = dataset.Dataset_tokens(data_train, data_train_tokens,\n labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n data_test_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(\n preprocess(s))] for s in df.loc[df.splitset_label == 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset_tokens(data_test, data_test_tokens,\n labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n return train_generator, test_generator, vocab, vocab_tokens\n",
"<import token>\nSCRIPT_DIR = os.path.dirname(__file__)\nDATA_FOLDER = 'data/'\n\n\ndef preprocess(s, lower=True):\n s = s.translate(str.maketrans('', '', string.punctuation))\n if lower:\n s = s.lower()\n return s\n\n\ndef load_sentimix(dataloader_params, language='spanish',\n use_balanced_loader=True, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build()\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n training_set = dataset.Dataset(data_train, labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset(data_test, labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n return train_generator, test_generator, vocab\n\n\ndef load_sentimix_tokens(dataloader_params, language='spanish',\n use_balanced_loader=False, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin', 'Unk']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build(vocab_size=10000)\n vocab_tokens = vocabulary.Vocabulary_tokens()\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n for token in word:\n vocab_tokens.count_token(token)\n vocab_tokens.build(vocab_size=60)\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n tokenizer_words = lambda x: vocab_tokens.word2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n data_train_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(\n preprocess(s))] for s in df.loc[df.splitset_label == 'train', 'tokens']\n ]\n training_set = dataset.Dataset_tokens(data_train, data_train_tokens,\n labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n data_test_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(\n preprocess(s))] for s in df.loc[df.splitset_label == 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset_tokens(data_test, data_test_tokens,\n labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n return train_generator, test_generator, vocab, vocab_tokens\n",
"<import token>\n<assignment token>\n\n\ndef preprocess(s, lower=True):\n s = s.translate(str.maketrans('', '', string.punctuation))\n if lower:\n s = s.lower()\n return s\n\n\ndef load_sentimix(dataloader_params, language='spanish',\n use_balanced_loader=True, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build()\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n training_set = dataset.Dataset(data_train, labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset(data_test, labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n return train_generator, test_generator, vocab\n\n\ndef load_sentimix_tokens(dataloader_params, language='spanish',\n use_balanced_loader=False, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin', 'Unk']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build(vocab_size=10000)\n vocab_tokens = vocabulary.Vocabulary_tokens()\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n for token in word:\n vocab_tokens.count_token(token)\n vocab_tokens.build(vocab_size=60)\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n tokenizer_words = lambda x: vocab_tokens.word2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n data_train_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(\n preprocess(s))] for s in df.loc[df.splitset_label == 'train', 'tokens']\n ]\n training_set = dataset.Dataset_tokens(data_train, data_train_tokens,\n labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n data_test_tokens = [[tokenizer_words(w) for w in vocab.split_sentence(\n preprocess(s))] for s in df.loc[df.splitset_label == 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset_tokens(data_test, data_test_tokens,\n labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch_tokens)\n return train_generator, test_generator, vocab, vocab_tokens\n",
"<import token>\n<assignment token>\n\n\ndef preprocess(s, lower=True):\n s = s.translate(str.maketrans('', '', string.punctuation))\n if lower:\n s = s.lower()\n return s\n\n\ndef load_sentimix(dataloader_params, language='spanish',\n use_balanced_loader=True, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build()\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n training_set = dataset.Dataset(data_train, labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset(data_test, labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n return train_generator, test_generator, vocab\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef load_sentimix(dataloader_params, language='spanish',\n use_balanced_loader=True, binary=False, allowed_words=None):\n df = utils.process_sentimix(language=language)\n labels_map = {'negative': 0, 'neutral': 1, 'positive': 2}\n if binary:\n labels_map = {'negative': 0, 'positive': 1}\n if language == 'spanish':\n lang_id_dict = {'lang1': 0, 'lang2': 1}\n id2lang = ['Eng', 'Spa', 'Unk']\n else:\n lang_id_dict = {'Eng': 0, 'Hin': 1}\n id2lang = ['Eng', 'Hin']\n vocab = vocabulary.Vocabulary(allowed_words=allowed_words)\n for text in df['tokens']:\n for word in preprocess(str(text)).split():\n vocab.count_token(word)\n vocab.build()\n vocab.num_labels = len(labels_map)\n vocab._i2l = list(labels_map.keys())\n vocab._l2i = labels_map\n vocab._id2lang = id2lang\n if binary:\n df = df[df.sentiment != 'neutral']\n tokenizer = lambda x: vocab.sentence2IDs(x)\n labels_train = list(df.loc[df.splitset_label == 'train', 'sentiment'].\n map(labels_map).astype(int))\n data_train = [tokenizer(preprocess(s)) for s in df.loc[df.\n splitset_label == 'train', 'tokens']]\n training_set = dataset.Dataset(data_train, labels_train)\n labels_test = list(df.loc[df.splitset_label == 'test', 'sentiment'].map\n (labels_map).astype(int))\n data_test = [tokenizer(preprocess(s)) for s in df.loc[df.splitset_label ==\n 'test', 'tokens']]\n language_test = [[lang_id_dict.get(t, 2) for t in s.split()] for s in\n df.loc[df.splitset_label == 'test', 'lang_id']]\n test_set = dataset.Dataset(data_test, labels_test, language_test)\n if use_balanced_loader:\n dataloader_params['shuffle'] = False\n train_generator = torch.utils.data.DataLoader(training_set, sampler\n =utils.ImbalancedDatasetSampler(training_set), **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n else:\n train_generator = torch.utils.data.DataLoader(training_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n test_generator = torch.utils.data.DataLoader(test_set, **\n dataloader_params, collate_fn=dataset.pad_and_sort_batch)\n return train_generator, test_generator, vocab\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,418 |
462983afe1987a12df8954fc8f0161dd6cfa2e29
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-08 19:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop_bot_app', '0024_auto_20170308_1925'),
]
operations = [
migrations.AddField(
model_name='postponedpost',
name='bot',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shop_bot_app.Bot'),
),
migrations.AlterField(
model_name='postponedpost',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shop_bot_app.Product', verbose_name='\u0422\u043e\u0432\u0430\u0440'),
),
]
|
[
"# -*- coding: utf-8 -*-\n# Generated by Django 1.10.3 on 2017-03-08 19:00\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop_bot_app', '0024_auto_20170308_1925'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='postponedpost',\n name='bot',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shop_bot_app.Bot'),\n ),\n migrations.AlterField(\n model_name='postponedpost',\n name='product',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shop_bot_app.Product', verbose_name='\\u0422\\u043e\\u0432\\u0430\\u0440'),\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop_bot_app', '0024_auto_20170308_1925')]\n operations = [migrations.AddField(model_name='postponedpost', name=\n 'bot', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.CASCADE, to='shop_bot_app.Bot')), migrations.\n AlterField(model_name='postponedpost', name='product', field=models\n .ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='shop_bot_app.Product', verbose_name='Товар'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop_bot_app', '0024_auto_20170308_1925')]\n operations = [migrations.AddField(model_name='postponedpost', name=\n 'bot', field=models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.CASCADE, to='shop_bot_app.Bot')), migrations.\n AlterField(model_name='postponedpost', name='product', field=models\n .ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='shop_bot_app.Product', verbose_name='Товар'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,419 |
1b85740fa0f4ede5981aa9ad3f5cd40aac6881d2
|
a=float(input())
print(a%1)
|
[
"a=float(input())\nprint(a%1)",
"a = float(input())\nprint(a % 1)\n",
"<assignment token>\nprint(a % 1)\n",
"<assignment token>\n<code token>\n"
] | false |
98,420 |
8c820af4c2adf0312bce878a9c5a90997455e6e8
|
data = {
"nNurses": 81,
"nHours": 38,
"minHours": 10,
"maxHours": 12,
"maxConsec": 12,
"maxPresence": 30,
"demand": [25, 22, 18, 17, 20, 23, 23, 21, 21, 18, 19, 23, 24, 23, 18, 18, 22, 26, 21, 21, 25, 22, 24, 23, 18, 22, 23, 21, 19, 17, 13, 12, 11, 11, 14, 9, 14, 12]
}
|
[
"data = { \n \"nNurses\": 81, \n \"nHours\": 38, \n \"minHours\": 10, \n \"maxHours\": 12, \n \"maxConsec\": 12, \n \"maxPresence\": 30, \n \"demand\": [25, 22, 18, 17, 20, 23, 23, 21, 21, 18, 19, 23, 24, 23, 18, 18, 22, 26, 21, 21, 25, 22, 24, 23, 18, 22, 23, 21, 19, 17, 13, 12, 11, 11, 14, 9, 14, 12] \n}",
"data = {'nNurses': 81, 'nHours': 38, 'minHours': 10, 'maxHours': 12,\n 'maxConsec': 12, 'maxPresence': 30, 'demand': [25, 22, 18, 17, 20, 23, \n 23, 21, 21, 18, 19, 23, 24, 23, 18, 18, 22, 26, 21, 21, 25, 22, 24, 23,\n 18, 22, 23, 21, 19, 17, 13, 12, 11, 11, 14, 9, 14, 12]}\n",
"<assignment token>\n"
] | false |
98,421 |
bfff920db27ed4875e4d885f98a36d3ad06aea2a
|
import socket
import pickle
HEADER = 10
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 9090))
while True:
cela_zprava = b""
while (nova_zprava := True):
zprava = s.recv(16)
if nova_zprava:
print(f"NOVA ZPRAVA DELKY: {zprava[:HEADER]}")
delka_zpravy = zprava[:HEADER]
nova_zprava = False
print(f"DELKA ZPRAVY: {delka_zpravy}")
cela_zprava += zprava
if len(cela_zprava) - HEADER == delka_zpravy:
print("OBDRZENA CELA ZPRAVA")
print(cela_zprava[HEADER:])
print(pickle.loads(cela_zprava[HEADER:]))
nova_zprava = True
cela_zprava = b""
|
[
"import socket\nimport pickle\n\n\nHEADER = 10\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((socket.gethostname(), 9090))\n\nwhile True:\n cela_zprava = b\"\"\n\n while (nova_zprava := True):\n zprava = s.recv(16)\n if nova_zprava:\n print(f\"NOVA ZPRAVA DELKY: {zprava[:HEADER]}\")\n delka_zpravy = zprava[:HEADER]\n nova_zprava = False\n\n print(f\"DELKA ZPRAVY: {delka_zpravy}\")\n cela_zprava += zprava\n\n if len(cela_zprava) - HEADER == delka_zpravy:\n print(\"OBDRZENA CELA ZPRAVA\")\n print(cela_zprava[HEADER:])\n print(pickle.loads(cela_zprava[HEADER:]))\n nova_zprava = True\n cela_zprava = b\"\"\n\n\n",
"import socket\nimport pickle\nHEADER = 10\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((socket.gethostname(), 9090))\nwhile True:\n cela_zprava = b''\n while (nova_zprava := True):\n zprava = s.recv(16)\n if nova_zprava:\n print(f'NOVA ZPRAVA DELKY: {zprava[:HEADER]}')\n delka_zpravy = zprava[:HEADER]\n nova_zprava = False\n print(f'DELKA ZPRAVY: {delka_zpravy}')\n cela_zprava += zprava\n if len(cela_zprava) - HEADER == delka_zpravy:\n print('OBDRZENA CELA ZPRAVA')\n print(cela_zprava[HEADER:])\n print(pickle.loads(cela_zprava[HEADER:]))\n nova_zprava = True\n cela_zprava = b''\n",
"<import token>\nHEADER = 10\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((socket.gethostname(), 9090))\nwhile True:\n cela_zprava = b''\n while (nova_zprava := True):\n zprava = s.recv(16)\n if nova_zprava:\n print(f'NOVA ZPRAVA DELKY: {zprava[:HEADER]}')\n delka_zpravy = zprava[:HEADER]\n nova_zprava = False\n print(f'DELKA ZPRAVY: {delka_zpravy}')\n cela_zprava += zprava\n if len(cela_zprava) - HEADER == delka_zpravy:\n print('OBDRZENA CELA ZPRAVA')\n print(cela_zprava[HEADER:])\n print(pickle.loads(cela_zprava[HEADER:]))\n nova_zprava = True\n cela_zprava = b''\n",
"<import token>\n<assignment token>\ns.connect((socket.gethostname(), 9090))\nwhile True:\n cela_zprava = b''\n while (nova_zprava := True):\n zprava = s.recv(16)\n if nova_zprava:\n print(f'NOVA ZPRAVA DELKY: {zprava[:HEADER]}')\n delka_zpravy = zprava[:HEADER]\n nova_zprava = False\n print(f'DELKA ZPRAVY: {delka_zpravy}')\n cela_zprava += zprava\n if len(cela_zprava) - HEADER == delka_zpravy:\n print('OBDRZENA CELA ZPRAVA')\n print(cela_zprava[HEADER:])\n print(pickle.loads(cela_zprava[HEADER:]))\n nova_zprava = True\n cela_zprava = b''\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,422 |
ca75496870d2c4d27cd0049b4480817318bbf250
|
# app/resources/message.py
# 3rd party imports
from flask_restful import Resource
from flask import request, jsonify
# local imports
from app.models import Message
from app.schemas import message_schema, messages_schema
class Text(Resource):
def post(self):
'''
create a new message
'''
email = request.get_json().get('email')
message = request.get_json().get('message')
if (email is not None and message is not None):
try:
new_message = Message(email, message)
new_message.save()
result = message_schema.dump(new_message).data
response = {
'status': 'Successful',
'data': result,
'message': 'Message successfully sent'
}
return response, 201
except Exception as e:
response = {
'status': 'Failed',
'data': str(e),
'message': 'Message not sent'
}
return response, 400
response = {
'status': 'Failed',
'message': 'Cannot send empty message'
}
return response, 400
|
[
"# app/resources/message.py\n\n# 3rd party imports\nfrom flask_restful import Resource\nfrom flask import request, jsonify\n\n# local imports\nfrom app.models import Message\nfrom app.schemas import message_schema, messages_schema\n\nclass Text(Resource):\n def post(self):\n '''\n create a new message\n '''\n\n email = request.get_json().get('email')\n message = request.get_json().get('message')\n\n if (email is not None and message is not None):\n try:\n new_message = Message(email, message)\n new_message.save()\n result = message_schema.dump(new_message).data\n response = {\n 'status': 'Successful',\n 'data': result,\n 'message': 'Message successfully sent'\n }\n return response, 201\n\n except Exception as e:\n response = {\n 'status': 'Failed',\n 'data': str(e),\n 'message': 'Message not sent'\n }\n\n return response, 400\n\n response = {\n 'status': 'Failed',\n 'message': 'Cannot send empty message'\n }\n\n return response, 400\n\n",
"from flask_restful import Resource\nfrom flask import request, jsonify\nfrom app.models import Message\nfrom app.schemas import message_schema, messages_schema\n\n\nclass Text(Resource):\n\n def post(self):\n \"\"\"\n create a new message\n \"\"\"\n email = request.get_json().get('email')\n message = request.get_json().get('message')\n if email is not None and message is not None:\n try:\n new_message = Message(email, message)\n new_message.save()\n result = message_schema.dump(new_message).data\n response = {'status': 'Successful', 'data': result,\n 'message': 'Message successfully sent'}\n return response, 201\n except Exception as e:\n response = {'status': 'Failed', 'data': str(e), 'message':\n 'Message not sent'}\n return response, 400\n response = {'status': 'Failed', 'message':\n 'Cannot send empty message'}\n return response, 400\n",
"<import token>\n\n\nclass Text(Resource):\n\n def post(self):\n \"\"\"\n create a new message\n \"\"\"\n email = request.get_json().get('email')\n message = request.get_json().get('message')\n if email is not None and message is not None:\n try:\n new_message = Message(email, message)\n new_message.save()\n result = message_schema.dump(new_message).data\n response = {'status': 'Successful', 'data': result,\n 'message': 'Message successfully sent'}\n return response, 201\n except Exception as e:\n response = {'status': 'Failed', 'data': str(e), 'message':\n 'Message not sent'}\n return response, 400\n response = {'status': 'Failed', 'message':\n 'Cannot send empty message'}\n return response, 400\n",
"<import token>\n\n\nclass Text(Resource):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,423 |
99d04fb8ccd66a904ab86e6625a9d7d6c22db8da
|
import numpy as np
import scipy.signal as signal
import sounddevice as sd
from rtlsdr import RtlSdr
Fs = 1024000 # sample rate
f = 446006250 # center frequency (Hz)
f_corr = 75 # frequency correction (ppm)
N = 8192000 # number of samples to read
# set up SDR
sdr = RtlSdr()
# configure device
sdr.sample_rate = Fs
sdr.center_freq = f
sdr.freq_correction = f_corr
sdr.gain = "auto"
# Read samples
print(10 * "=", f"Capturing started, freq: {f}", 10 * "=")
samples = sdr.read_samples(N)
print(10 * "=", "Capturing stopped", 10 * "=")
# Convert samples to a numpy array
x2 = np.array(samples).astype("complex64")
# An FM broadcast signal has a bandwidth of 200 kHz
f_bw = 10000
n_taps = 64
# Use Remez algorithm to design filter coefficients
lpf = signal.remez(n_taps, [0, f_bw, f_bw + (Fs / 2 - f_bw) / 4, Fs / 2], [1, 0], Hz=Fs)
x3 = signal.lfilter(lpf, 1.0, x2)
dec_rate = int(Fs / f_bw)
x4 = x3[0::dec_rate]
# Calculate the new sampling rate
Fs_y = Fs / dec_rate
### Polar discriminator
y5 = x4[1:] * np.conj(x4[:-1])
x5 = np.angle(y5)
# The de-emphasis filter
# Given a signal 'x5' (in a numpy array) with sampling rate Fs_y
d = Fs_y * 75e-6 # Calculate the # of samples to hit the -3dB point
x = np.exp(-1 / d) # Calculate the decay between each sample
b = [1 - x] # Create the filter coefficients
a = [1, -x]
x6 = signal.lfilter(b, a, x5)
audio_freq = 10000.0
dec_audio = int(Fs_y / audio_freq)
Fs_audio = int(Fs_y / dec_audio)
x7 = signal.decimate(x6, dec_audio)
# Scale audio to adjust volume
x7 *= 75000 / np.max(np.abs(x7))
# print sampling freq and save raw sound to file
# print(Fs_audio)
# # Save to file as 16-bit signed single-channel audio samples
# x7.astype("int16").tofile("wbfm-mono.raw")
# play sound
wav_wave = np.array(x7, dtype=np.int16)
sd.play(wav_wave, Fs_audio, blocking=True)
|
[
"import numpy as np\nimport scipy.signal as signal\nimport sounddevice as sd\n\nfrom rtlsdr import RtlSdr\n\nFs = 1024000 # sample rate\nf = 446006250 # center frequency (Hz)\nf_corr = 75 # frequency correction (ppm)\nN = 8192000 # number of samples to read\n\n# set up SDR\nsdr = RtlSdr()\n\n# configure device\nsdr.sample_rate = Fs\nsdr.center_freq = f\nsdr.freq_correction = f_corr\nsdr.gain = \"auto\"\n\n\n# Read samples\nprint(10 * \"=\", f\"Capturing started, freq: {f}\", 10 * \"=\")\nsamples = sdr.read_samples(N)\nprint(10 * \"=\", \"Capturing stopped\", 10 * \"=\")\n\n# Convert samples to a numpy array\nx2 = np.array(samples).astype(\"complex64\")\n\n# An FM broadcast signal has a bandwidth of 200 kHz\nf_bw = 10000\nn_taps = 64\n# Use Remez algorithm to design filter coefficients\nlpf = signal.remez(n_taps, [0, f_bw, f_bw + (Fs / 2 - f_bw) / 4, Fs / 2], [1, 0], Hz=Fs)\nx3 = signal.lfilter(lpf, 1.0, x2)\n\ndec_rate = int(Fs / f_bw)\nx4 = x3[0::dec_rate]\n# Calculate the new sampling rate\nFs_y = Fs / dec_rate\n\n### Polar discriminator\ny5 = x4[1:] * np.conj(x4[:-1])\nx5 = np.angle(y5)\n\n# The de-emphasis filter\n# Given a signal 'x5' (in a numpy array) with sampling rate Fs_y\nd = Fs_y * 75e-6 # Calculate the # of samples to hit the -3dB point\nx = np.exp(-1 / d) # Calculate the decay between each sample\nb = [1 - x] # Create the filter coefficients\na = [1, -x]\nx6 = signal.lfilter(b, a, x5)\n\naudio_freq = 10000.0\ndec_audio = int(Fs_y / audio_freq)\nFs_audio = int(Fs_y / dec_audio)\n\nx7 = signal.decimate(x6, dec_audio)\n\n# Scale audio to adjust volume\nx7 *= 75000 / np.max(np.abs(x7))\n\n# print sampling freq and save raw sound to file\n# print(Fs_audio)\n# # Save to file as 16-bit signed single-channel audio samples\n# x7.astype(\"int16\").tofile(\"wbfm-mono.raw\")\n\n# play sound\nwav_wave = np.array(x7, dtype=np.int16)\nsd.play(wav_wave, Fs_audio, blocking=True)\n",
"import numpy as np\nimport scipy.signal as signal\nimport sounddevice as sd\nfrom rtlsdr import RtlSdr\nFs = 1024000\nf = 446006250\nf_corr = 75\nN = 8192000\nsdr = RtlSdr()\nsdr.sample_rate = Fs\nsdr.center_freq = f\nsdr.freq_correction = f_corr\nsdr.gain = 'auto'\nprint(10 * '=', f'Capturing started, freq: {f}', 10 * '=')\nsamples = sdr.read_samples(N)\nprint(10 * '=', 'Capturing stopped', 10 * '=')\nx2 = np.array(samples).astype('complex64')\nf_bw = 10000\nn_taps = 64\nlpf = signal.remez(n_taps, [0, f_bw, f_bw + (Fs / 2 - f_bw) / 4, Fs / 2], [\n 1, 0], Hz=Fs)\nx3 = signal.lfilter(lpf, 1.0, x2)\ndec_rate = int(Fs / f_bw)\nx4 = x3[0::dec_rate]\nFs_y = Fs / dec_rate\ny5 = x4[1:] * np.conj(x4[:-1])\nx5 = np.angle(y5)\nd = Fs_y * 7.5e-05\nx = np.exp(-1 / d)\nb = [1 - x]\na = [1, -x]\nx6 = signal.lfilter(b, a, x5)\naudio_freq = 10000.0\ndec_audio = int(Fs_y / audio_freq)\nFs_audio = int(Fs_y / dec_audio)\nx7 = signal.decimate(x6, dec_audio)\nx7 *= 75000 / np.max(np.abs(x7))\nwav_wave = np.array(x7, dtype=np.int16)\nsd.play(wav_wave, Fs_audio, blocking=True)\n",
"<import token>\nFs = 1024000\nf = 446006250\nf_corr = 75\nN = 8192000\nsdr = RtlSdr()\nsdr.sample_rate = Fs\nsdr.center_freq = f\nsdr.freq_correction = f_corr\nsdr.gain = 'auto'\nprint(10 * '=', f'Capturing started, freq: {f}', 10 * '=')\nsamples = sdr.read_samples(N)\nprint(10 * '=', 'Capturing stopped', 10 * '=')\nx2 = np.array(samples).astype('complex64')\nf_bw = 10000\nn_taps = 64\nlpf = signal.remez(n_taps, [0, f_bw, f_bw + (Fs / 2 - f_bw) / 4, Fs / 2], [\n 1, 0], Hz=Fs)\nx3 = signal.lfilter(lpf, 1.0, x2)\ndec_rate = int(Fs / f_bw)\nx4 = x3[0::dec_rate]\nFs_y = Fs / dec_rate\ny5 = x4[1:] * np.conj(x4[:-1])\nx5 = np.angle(y5)\nd = Fs_y * 7.5e-05\nx = np.exp(-1 / d)\nb = [1 - x]\na = [1, -x]\nx6 = signal.lfilter(b, a, x5)\naudio_freq = 10000.0\ndec_audio = int(Fs_y / audio_freq)\nFs_audio = int(Fs_y / dec_audio)\nx7 = signal.decimate(x6, dec_audio)\nx7 *= 75000 / np.max(np.abs(x7))\nwav_wave = np.array(x7, dtype=np.int16)\nsd.play(wav_wave, Fs_audio, blocking=True)\n",
"<import token>\n<assignment token>\nprint(10 * '=', f'Capturing started, freq: {f}', 10 * '=')\n<assignment token>\nprint(10 * '=', 'Capturing stopped', 10 * '=')\n<assignment token>\nx7 *= 75000 / np.max(np.abs(x7))\n<assignment token>\nsd.play(wav_wave, Fs_audio, blocking=True)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,424 |
11cd4abb1c65f39f15c803beadbf542e9298981d
|
from typing import Any, NamedTuple, Optional, Text
class _URLTuple(NamedTuple):
scheme: Any
netloc: Any
path: Any
query: Any
fragment: Any
class BaseURL(_URLTuple):
def replace(self, **kwargs): ...
@property
def host(self): ...
@property
def ascii_host(self): ...
@property
def port(self): ...
@property
def auth(self): ...
@property
def username(self): ...
@property
def raw_username(self): ...
@property
def password(self): ...
@property
def raw_password(self): ...
def decode_query(self, *args, **kwargs): ...
def join(self, *args, **kwargs): ...
def to_url(self): ...
def decode_netloc(self): ...
def to_uri_tuple(self): ...
def to_iri_tuple(self): ...
def get_file_location(self, pathformat: Optional[Any] = ...): ...
class URL(BaseURL):
def encode_netloc(self): ...
def encode(self, charset: Text = ..., errors: Text = ...): ...
class BytesURL(BaseURL):
def encode_netloc(self): ...
def decode(self, charset: Text = ..., errors: Text = ...): ...
def url_parse(url, scheme: Optional[Any] = ..., allow_fragments: bool = ...): ...
def url_quote(string, charset: Text = ..., errors: Text = ..., safe: str = ..., unsafe: str = ...): ...
def url_quote_plus(string, charset: Text = ..., errors: Text = ..., safe: str = ...): ...
def url_unparse(components): ...
def url_unquote(string, charset: Text = ..., errors: Text = ..., unsafe: str = ...): ...
def url_unquote_plus(s, charset: Text = ..., errors: Text = ...): ...
def url_fix(s, charset: Text = ...): ...
def uri_to_iri(uri, charset: Text = ..., errors: Text = ...): ...
def iri_to_uri(iri, charset: Text = ..., errors: Text = ..., safe_conversion: bool = ...): ...
def url_decode(s, charset: Text = ..., decode_keys: bool = ..., include_empty: bool = ..., errors: Text = ...,
separator: str = ..., cls: Optional[Any] = ...): ...
def url_decode_stream(stream, charset: Text = ..., decode_keys: bool = ..., include_empty: bool = ..., errors: Text = ...,
separator: str = ..., cls: Optional[Any] = ..., limit: Optional[Any] = ...,
return_iterator: bool = ...): ...
def url_encode(obj, charset: Text = ..., encode_keys: bool = ..., sort: bool = ..., key: Optional[Any] = ...,
separator: bytes = ...): ...
def url_encode_stream(obj, stream: Optional[Any] = ..., charset: Text = ..., encode_keys: bool = ..., sort: bool = ...,
key: Optional[Any] = ..., separator: bytes = ...): ...
def url_join(base, url, allow_fragments: bool = ...): ...
class Href:
base: Any
charset: Text
sort: Any
key: Any
def __init__(self, base: str = ..., charset: Text = ..., sort: bool = ..., key: Optional[Any] = ...): ...
def __getattr__(self, name): ...
def __call__(self, *path, **query): ...
|
[
"from typing import Any, NamedTuple, Optional, Text\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\nclass BaseURL(_URLTuple):\n def replace(self, **kwargs): ...\n @property\n def host(self): ...\n @property\n def ascii_host(self): ...\n @property\n def port(self): ...\n @property\n def auth(self): ...\n @property\n def username(self): ...\n @property\n def raw_username(self): ...\n @property\n def password(self): ...\n @property\n def raw_password(self): ...\n def decode_query(self, *args, **kwargs): ...\n def join(self, *args, **kwargs): ...\n def to_url(self): ...\n def decode_netloc(self): ...\n def to_uri_tuple(self): ...\n def to_iri_tuple(self): ...\n def get_file_location(self, pathformat: Optional[Any] = ...): ...\n\nclass URL(BaseURL):\n def encode_netloc(self): ...\n def encode(self, charset: Text = ..., errors: Text = ...): ...\n\nclass BytesURL(BaseURL):\n def encode_netloc(self): ...\n def decode(self, charset: Text = ..., errors: Text = ...): ...\n\ndef url_parse(url, scheme: Optional[Any] = ..., allow_fragments: bool = ...): ...\ndef url_quote(string, charset: Text = ..., errors: Text = ..., safe: str = ..., unsafe: str = ...): ...\ndef url_quote_plus(string, charset: Text = ..., errors: Text = ..., safe: str = ...): ...\ndef url_unparse(components): ...\ndef url_unquote(string, charset: Text = ..., errors: Text = ..., unsafe: str = ...): ...\ndef url_unquote_plus(s, charset: Text = ..., errors: Text = ...): ...\ndef url_fix(s, charset: Text = ...): ...\ndef uri_to_iri(uri, charset: Text = ..., errors: Text = ...): ...\ndef iri_to_uri(iri, charset: Text = ..., errors: Text = ..., safe_conversion: bool = ...): ...\ndef url_decode(s, charset: Text = ..., decode_keys: bool = ..., include_empty: bool = ..., errors: Text = ...,\n separator: str = ..., cls: Optional[Any] = ...): ...\ndef url_decode_stream(stream, charset: Text = ..., decode_keys: bool = ..., include_empty: bool = ..., errors: Text = ...,\n separator: str = ..., cls: Optional[Any] = ..., limit: Optional[Any] = ...,\n return_iterator: bool = ...): ...\ndef url_encode(obj, charset: Text = ..., encode_keys: bool = ..., sort: bool = ..., key: Optional[Any] = ...,\n separator: bytes = ...): ...\ndef url_encode_stream(obj, stream: Optional[Any] = ..., charset: Text = ..., encode_keys: bool = ..., sort: bool = ...,\n key: Optional[Any] = ..., separator: bytes = ...): ...\ndef url_join(base, url, allow_fragments: bool = ...): ...\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n def __init__(self, base: str = ..., charset: Text = ..., sort: bool = ..., key: Optional[Any] = ...): ...\n def __getattr__(self, name): ...\n def __call__(self, *path, **query): ...\n",
"from typing import Any, NamedTuple, Optional, Text\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_parse(url, scheme: Optional[Any]=..., allow_fragments: bool=...):\n ...\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\ndef url_unquote(string, charset: Text=..., errors: Text=..., unsafe: str=...):\n ...\n\n\ndef url_unquote_plus(s, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\ndef iri_to_uri(iri, charset: Text=..., errors: Text=..., safe_conversion:\n bool=...):\n ...\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\ndef url_join(base, url, allow_fragments: bool=...):\n ...\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_parse(url, scheme: Optional[Any]=..., allow_fragments: bool=...):\n ...\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\ndef url_unquote(string, charset: Text=..., errors: Text=..., unsafe: str=...):\n ...\n\n\ndef url_unquote_plus(s, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\ndef iri_to_uri(iri, charset: Text=..., errors: Text=..., safe_conversion:\n bool=...):\n ...\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\ndef url_join(base, url, allow_fragments: bool=...):\n ...\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_parse(url, scheme: Optional[Any]=..., allow_fragments: bool=...):\n ...\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\ndef url_unquote(string, charset: Text=..., errors: Text=..., unsafe: str=...):\n ...\n\n\ndef url_unquote_plus(s, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\ndef iri_to_uri(iri, charset: Text=..., errors: Text=..., safe_conversion:\n bool=...):\n ...\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\ndef url_parse(url, scheme: Optional[Any]=..., allow_fragments: bool=...):\n ...\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\ndef url_unquote(string, charset: Text=..., errors: Text=..., unsafe: str=...):\n ...\n\n\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\ndef iri_to_uri(iri, charset: Text=..., errors: Text=..., safe_conversion:\n bool=...):\n ...\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\ndef url_unquote(string, charset: Text=..., errors: Text=..., unsafe: str=...):\n ...\n\n\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\ndef iri_to_uri(iri, charset: Text=..., errors: Text=..., safe_conversion:\n bool=...):\n ...\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\ndef url_unquote(string, charset: Text=..., errors: Text=..., unsafe: str=...):\n ...\n\n\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\ndef url_decode_stream(stream, charset: Text=..., decode_keys: bool=...,\n include_empty: bool=..., errors: Text=..., separator: str=..., cls:\n Optional[Any]=..., limit: Optional[Any]=..., return_iterator: bool=...):\n ...\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_quote(string, charset: Text=..., errors: Text=..., safe: str=...,\n unsafe: str=...):\n ...\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\n<function token>\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\ndef url_unparse(components):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\n<function token>\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\ndef uri_to_iri(uri, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\n<function token>\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_decode(s, charset: Text=..., decode_keys: bool=..., include_empty:\n bool=..., errors: Text=..., separator: str=..., cls: Optional[Any]=...):\n ...\n\n\n<function token>\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef url_fix(s, charset: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef url_encode(obj, charset: Text=..., encode_keys: bool=..., sort: bool=\n ..., key: Optional[Any]=..., separator: bytes=...):\n ...\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef url_encode_stream(obj, stream: Optional[Any]=..., charset: Text=...,\n encode_keys: bool=..., sort: bool=..., key: Optional[Any]=...,\n separator: bytes=...):\n ...\n\n\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n\n\ndef url_quote_plus(string, charset: Text=..., errors: Text=..., safe: str=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n\n\nclass _URLTuple(NamedTuple):\n scheme: Any\n netloc: Any\n path: Any\n query: Any\n fragment: Any\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n\n def replace(self, **kwargs):\n ...\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n\n def to_iri_tuple(self):\n ...\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n\n def get_file_location(self, pathformat: Optional[Any]=...):\n ...\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n\n @property\n def port(self):\n ...\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n <function token>\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n\n def decode_query(self, *args, **kwargs):\n ...\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n <function token>\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n <function token>\n\n def join(self, *args, **kwargs):\n ...\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n\n @property\n def ascii_host(self):\n ...\n <function token>\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n <function token>\n <function token>\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n <function token>\n <function token>\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n <function token>\n <function token>\n\n def to_url(self):\n ...\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n <function token>\n <function token>\n\n @property\n def auth(self):\n ...\n\n @property\n def username(self):\n ...\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n\n @property\n def host(self):\n ...\n <function token>\n <function token>\n\n @property\n def auth(self):\n ...\n <function token>\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def auth(self):\n ...\n <function token>\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n\n @property\n def raw_password(self):\n ...\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def auth(self):\n ...\n <function token>\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n\n def to_uri_tuple(self):\n ...\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def auth(self):\n ...\n <function token>\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n <function token>\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def raw_username(self):\n ...\n\n @property\n def password(self):\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n <function token>\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def raw_username(self):\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n <function token>\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def decode_netloc(self):\n ...\n <function token>\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n\n\nclass BaseURL(_URLTuple):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def encode(self, charset: Text=..., errors: Text=...):\n ...\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n\n\nclass URL(BaseURL):\n\n def encode_netloc(self):\n ...\n <function token>\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n\n\nclass URL(BaseURL):\n <function token>\n <function token>\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass BytesURL(BaseURL):\n\n def encode_netloc(self):\n ...\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass BytesURL(BaseURL):\n <function token>\n\n def decode(self, charset: Text=..., errors: Text=...):\n ...\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass BytesURL(BaseURL):\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n\n def __call__(self, *path, **query):\n ...\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n\n def __init__(self, base: str=..., charset: Text=..., sort: bool=...,\n key: Optional[Any]=...):\n ...\n\n def __getattr__(self, name):\n ...\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n <function token>\n\n def __getattr__(self, name):\n ...\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Href:\n base: Any\n charset: Text\n sort: Any\n key: Any\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n"
] | false |
98,425 |
b1427055e5cab1b77860175874d643c7f932e7f0
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''11.py
@author:cnfuyu
@date:2013-4-6
'''
import Image, ImageDraw
if __name__ == '__main__':
im = Image.open('./python_challenge_11.jpg')
nim = Image.new(im.mode, (im.size[0] / 2, im.size[1] / 2) )
for x in range(1, im.size[0], 2):
for y in range(1, im.size[1], 2):
nim.putpixel( (x // 2, y // 2), im.getpixel( (x, y) ) )
nim.save('./python_challenge_11_result.png')
|
[
"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n'''11.py\n@author:cnfuyu\n@date:2013-4-6\n'''\n\nimport Image, ImageDraw\n\nif __name__ == '__main__':\n im = Image.open('./python_challenge_11.jpg')\n nim = Image.new(im.mode, (im.size[0] / 2, im.size[1] / 2) )\n \n for x in range(1, im.size[0], 2):\n for y in range(1, im.size[1], 2):\n nim.putpixel( (x // 2, y // 2), im.getpixel( (x, y) ) ) \n\n nim.save('./python_challenge_11_result.png')\n",
"<docstring token>\nimport Image, ImageDraw\nif __name__ == '__main__':\n im = Image.open('./python_challenge_11.jpg')\n nim = Image.new(im.mode, (im.size[0] / 2, im.size[1] / 2))\n for x in range(1, im.size[0], 2):\n for y in range(1, im.size[1], 2):\n nim.putpixel((x // 2, y // 2), im.getpixel((x, y)))\n nim.save('./python_challenge_11_result.png')\n",
"<docstring token>\n<import token>\nif __name__ == '__main__':\n im = Image.open('./python_challenge_11.jpg')\n nim = Image.new(im.mode, (im.size[0] / 2, im.size[1] / 2))\n for x in range(1, im.size[0], 2):\n for y in range(1, im.size[1], 2):\n nim.putpixel((x // 2, y // 2), im.getpixel((x, y)))\n nim.save('./python_challenge_11_result.png')\n",
"<docstring token>\n<import token>\n<code token>\n"
] | false |
98,426 |
a604344b460218fc752ab34c0f8c158e3ccfd1c5
|
import os
import rospy
import numpy as np
def publish_trajectory(filename, param_name):
filename_ = os.getcwd() + '/' + filename
trajectory = np.genfromtxt(filename_, delimiter=',')
trajectory_dict={'t':trajectory[:,0].tolist(),
'pos':{'x':trajectory[:,1].tolist(),
'y':trajectory[:,2].tolist(),
'z':trajectory[:,3].tolist()},
'vel':{'x':trajectory[:,4].tolist(),
'y':trajectory[:,5].tolist(),
'z':trajectory[:,6].tolist()},
'acc':{'x':trajectory[:,7].tolist(),
'y':trajectory[:,8].tolist(),
'z':trajectory[:,9].tolist()}}
rospy.set_param('/'+param_name, trajectory_dict)
def publish_zmp_trajectory(filename, param_name):
filename_ = os.getcwd() + '/' + filename
trajectory = np.genfromtxt(filename_, delimiter=',')
trajectory_dict={'t':trajectory[:,0].tolist(),
'x':trajectory[:,1].tolist(),
'y':trajectory[:,2].tolist(),
'z':trajectory[:,3].tolist()}
rospy.set_param('/'+param_name, trajectory_dict)
def publish_support_durations(filename, param_name):
filename_ = os.getcwd() + '/' + filename
support_durations = np.genfromtxt(filename_, delimiter=',')
rospy.set_param('/'+param_name, support_durations.tolist())
def publish_support_end_times(filename, param_name):
filename_ = os.getcwd() + '/' + filename
support_durations = np.genfromtxt(filename_, delimiter=',')
support_end_times = np.cumsum(support_durations)
rospy.set_param('/'+param_name, support_end_times.tolist())
def publish_support_indexes(filename, param_name):
filename_ = os.getcwd() + '/' + filename
support_indexes = np.genfromtxt(filename_, delimiter=',')
rospy.set_param('/'+param_name, support_indexes.tolist())
def publish_all():
try:
while not rospy.is_shutdown():
publish_trajectory(filename='com_trajectory.csv', param_name='com_trajectory')
publish_zmp_trajectory(filename='zmp_trajectory.csv', param_name='zmp_trajectory')
publish_support_durations(filename='support_durations.csv', param_name='support_durations')
publish_support_end_times(filename='support_durations.csv', param_name='support_end_times')
publish_support_indexes(filename='support_indexes.csv', param_name='support_indexes')
break
except rospy.ROSInterruptException: pass
if __name__ == "__main__":
publish_all()
|
[
"import os\nimport rospy\nimport numpy as np\n\n\ndef publish_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict={'t':trajectory[:,0].tolist(),\n 'pos':{'x':trajectory[:,1].tolist(),\n 'y':trajectory[:,2].tolist(),\n 'z':trajectory[:,3].tolist()},\n 'vel':{'x':trajectory[:,4].tolist(),\n 'y':trajectory[:,5].tolist(),\n 'z':trajectory[:,6].tolist()},\n 'acc':{'x':trajectory[:,7].tolist(),\n 'y':trajectory[:,8].tolist(),\n 'z':trajectory[:,9].tolist()}}\n rospy.set_param('/'+param_name, trajectory_dict)\n\ndef publish_zmp_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict={'t':trajectory[:,0].tolist(),\n 'x':trajectory[:,1].tolist(),\n 'y':trajectory[:,2].tolist(),\n 'z':trajectory[:,3].tolist()}\n rospy.set_param('/'+param_name, trajectory_dict)\n\ndef publish_support_durations(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/'+param_name, support_durations.tolist())\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/'+param_name, support_end_times.tolist())\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/'+param_name, support_indexes.tolist())\n\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name='com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv', param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv', param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv', param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv', param_name='support_indexes')\n\n break\n except rospy.ROSInterruptException: pass\n\nif __name__ == \"__main__\":\n publish_all()",
"import os\nimport rospy\nimport numpy as np\n\n\ndef publish_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'pos': {'x':\n trajectory[:, 1].tolist(), 'y': trajectory[:, 2].tolist(), 'z':\n trajectory[:, 3].tolist()}, 'vel': {'x': trajectory[:, 4].tolist(),\n 'y': trajectory[:, 5].tolist(), 'z': trajectory[:, 6].tolist()},\n 'acc': {'x': trajectory[:, 7].tolist(), 'y': trajectory[:, 8].\n tolist(), 'z': trajectory[:, 9].tolist()}}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_zmp_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'x': trajectory[:, 1\n ].tolist(), 'y': trajectory[:, 2].tolist(), 'z': trajectory[:, 3].\n tolist()}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_support_durations(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_durations.tolist())\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_indexes.tolist())\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\nif __name__ == '__main__':\n publish_all()\n",
"<import token>\n\n\ndef publish_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'pos': {'x':\n trajectory[:, 1].tolist(), 'y': trajectory[:, 2].tolist(), 'z':\n trajectory[:, 3].tolist()}, 'vel': {'x': trajectory[:, 4].tolist(),\n 'y': trajectory[:, 5].tolist(), 'z': trajectory[:, 6].tolist()},\n 'acc': {'x': trajectory[:, 7].tolist(), 'y': trajectory[:, 8].\n tolist(), 'z': trajectory[:, 9].tolist()}}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_zmp_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'x': trajectory[:, 1\n ].tolist(), 'y': trajectory[:, 2].tolist(), 'z': trajectory[:, 3].\n tolist()}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_support_durations(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_durations.tolist())\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_indexes.tolist())\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\nif __name__ == '__main__':\n publish_all()\n",
"<import token>\n\n\ndef publish_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'pos': {'x':\n trajectory[:, 1].tolist(), 'y': trajectory[:, 2].tolist(), 'z':\n trajectory[:, 3].tolist()}, 'vel': {'x': trajectory[:, 4].tolist(),\n 'y': trajectory[:, 5].tolist(), 'z': trajectory[:, 6].tolist()},\n 'acc': {'x': trajectory[:, 7].tolist(), 'y': trajectory[:, 8].\n tolist(), 'z': trajectory[:, 9].tolist()}}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_zmp_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'x': trajectory[:, 1\n ].tolist(), 'y': trajectory[:, 2].tolist(), 'z': trajectory[:, 3].\n tolist()}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_support_durations(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_durations.tolist())\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_indexes.tolist())\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\n<code token>\n",
"<import token>\n\n\ndef publish_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'pos': {'x':\n trajectory[:, 1].tolist(), 'y': trajectory[:, 2].tolist(), 'z':\n trajectory[:, 3].tolist()}, 'vel': {'x': trajectory[:, 4].tolist(),\n 'y': trajectory[:, 5].tolist(), 'z': trajectory[:, 6].tolist()},\n 'acc': {'x': trajectory[:, 7].tolist(), 'y': trajectory[:, 8].\n tolist(), 'z': trajectory[:, 9].tolist()}}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\ndef publish_zmp_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'x': trajectory[:, 1\n ].tolist(), 'y': trajectory[:, 2].tolist(), 'z': trajectory[:, 3].\n tolist()}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\n<function token>\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_indexes.tolist())\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef publish_zmp_trajectory(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n trajectory = np.genfromtxt(filename_, delimiter=',')\n trajectory_dict = {'t': trajectory[:, 0].tolist(), 'x': trajectory[:, 1\n ].tolist(), 'y': trajectory[:, 2].tolist(), 'z': trajectory[:, 3].\n tolist()}\n rospy.set_param('/' + param_name, trajectory_dict)\n\n\n<function token>\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_indexes.tolist())\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\ndef publish_support_indexes(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_indexes = np.genfromtxt(filename_, delimiter=',')\n rospy.set_param('/' + param_name, support_indexes.tolist())\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef publish_support_end_times(filename, param_name):\n filename_ = os.getcwd() + '/' + filename\n support_durations = np.genfromtxt(filename_, delimiter=',')\n support_end_times = np.cumsum(support_durations)\n rospy.set_param('/' + param_name, support_end_times.tolist())\n\n\n<function token>\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef publish_all():\n try:\n while not rospy.is_shutdown():\n publish_trajectory(filename='com_trajectory.csv', param_name=\n 'com_trajectory')\n publish_zmp_trajectory(filename='zmp_trajectory.csv',\n param_name='zmp_trajectory')\n publish_support_durations(filename='support_durations.csv',\n param_name='support_durations')\n publish_support_end_times(filename='support_durations.csv',\n param_name='support_end_times')\n publish_support_indexes(filename='support_indexes.csv',\n param_name='support_indexes')\n break\n except rospy.ROSInterruptException:\n pass\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,427 |
6e08110b24fe5602312123563818a7cda0380203
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests metrics correctness using Keras model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops.losses import loss_reduction
from tensorflow.python.platform import test
def get_multi_io_model():
inp_1 = layers.Input(shape=(1,), name='input_1')
inp_2 = layers.Input(shape=(1,), name='input_2')
x = layers.Dense(3, kernel_initializer='ones', trainable=False)
out_1 = layers.Dense(
1, kernel_initializer='ones', name='output_1', trainable=False)
out_2 = layers.Dense(
1, kernel_initializer='ones', name='output_2', trainable=False)
branch_a = [inp_1, x, out_1]
branch_b = [inp_2, x, out_2]
return testing_utils.get_multi_io_model(branch_a, branch_b)
def custom_generator_multi_io(sample_weights=None):
batch_size = 2
num_samples = 4
inputs = np.asarray([[1.], [2.], [3.], [4.]])
targets_1 = np.asarray([[2.], [4.], [6.], [8.]])
targets_2 = np.asarray([[1.], [2.], [3.], [4.]])
if sample_weights:
assert len(sample_weights) == 2
w1 = sample_weights[0]
w2 = sample_weights[1]
else:
w1 = None
w2 = None
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
x = [inputs[start:end], inputs[start:end]]
y = [targets_1[start:end], targets_2[start:end]]
if sample_weights:
w = [
None if w1 is None else w1[start:end],
None if w2 is None else w2[start:end]
]
else:
w = None
yield x, y, w
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):
def _get_compiled_multi_io_model(self):
model = get_multi_io_model()
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
weighted_metrics=[
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def setUp(self):
super(TestMetricsCorrectnessMultiIO, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y1 = np.asarray([[2.], [4.], [6.], [8.]])
self.y2 = np.asarray([[1.], [2.], [3.], [4.]])
self.sample_weight_1 = np.asarray([2., 3., 4., 5.])
self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])
self.class_weight_1 = {2: 2, 4: 3, 6: 4, 8: 5}
self.class_weight_2 = {1: 3.5, 2: 2.5, 3: 1.5, 4: 0.5}
# y_true_1 = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# y_true_2 = [[1.], [2.], [3.], [4.]], y_pred = [[3.], [6.], [9.], [12.]]
# Weighted metric `output_1`:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = (2 + 3) + (4 + 5)
# Result = 9.2857141
# Weighted metric `output_2`:
# Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) +
# ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5)
# = 140
# Count = (3.5 + 2.5) + (1.5 + 0.5)
# Result = 17.5
# Loss `output_1` with weights:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = 2 + 2
# Result = 32.5
# Loss `output_1` without weights/Metric `output_1`:
# Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30
# Count = 2 + 2
# Result = 7.5
# Loss `output_2` with weights:
# Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) +
# ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5)
# = 140
# Count = 2 + 2
# Result = 35
# Loss `output_2` without weights/Metric `output_2`:
# Total = ((3 - 1)^2 + (6 - 2)^2) + ((9 - 3)^2 + (12 - 4)^2) = 120
# Count = 2 + 2
# Result = 30
# Total loss with weights = 32.5 + 35 = 67.5
# Total loss without weights = 7.5 + 30 = 37.5
self.wmse = 'mean_squared_error_2'
if not tf2.enabled():
self.wmse = 'weighted_' + self.wmse
self.expected_fit_result_with_weights = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [30, 30],
'output_1_' + self.wmse: [9.286, 9.286],
'output_2_' + self.wmse: [17.5, 17.5],
'loss': [67.5, 67.5],
'output_1_loss': [32.5, 32.5],
'output_2_loss': [35, 35],
}
self.expected_fit_result_with_weights_output_2 = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [30, 30],
'output_1_' + self.wmse: [7.5, 7.5],
'output_2_' + self.wmse: [17.5, 17.5],
'loss': [42.5, 42.5],
'output_1_loss': [7.5, 7.5],
'output_2_loss': [35, 35],
}
self.expected_fit_result = {
'output_1_mean_squared_error': [7.5, 7.5],
'output_2_mean_squared_error': [30, 30],
'output_1_' + self.wmse: [7.5, 7.5],
'output_2_' + self.wmse: [30, 30],
'loss': [37.5, 37.5],
'output_1_loss': [7.5, 7.5],
'output_2_loss': [30, 30],
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
# 'output_1_mean_squared_error', 'output_1_mean_squared_error_2',
# 'output_2_mean_squared_error', 'output_2_mean_squared_error_2'
self.expected_batch_result_with_weights = [
67.5, 32.5, 35, 7.5, 9.286, 30, 17.5
]
self.expected_batch_result_with_weights_output_2 = [
42.5, 7.5, 35, 7.5, 7.5, 30, 17.5
]
self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]
def test_fit(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y1, self.y2],
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output (use batch size).
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={'output_2': self.sample_weight_2},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_class_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit([self.x, self.x], [self.y1, self.y2],
class_weight={
'output_1': self.class_weight_1,
'output_2': self.class_weight_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output.
history = model.fit([self.x, self.x], [self.y1, self.y2],
class_weight={'output_2': self.class_weight_2},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2,
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
# Set weights for one output.
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2,
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result,
self.expected_batch_result_with_weights_output_2, 1e-3)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 1))
y = np.random.random((50, 1))
w = np.random.random((50,))
mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=5)[3]
mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],
batch_size=10)[3]
self.assertAllClose(mse1, mse2, 1e-3)
def test_train_on_batch(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_train_on_batch_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
# Set weights for one output.
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2, 1e-3)
def test_train_on_batch_with_class_weight(self):
model = self._get_compiled_multi_io_model()
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
class_weight={
'output_1': self.class_weight_1,
'output_2': self.class_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
# Set weights for one output.
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
class_weight={
'output_2': self.class_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2, 1e-3)
def test_test_on_batch(self):
model = self._get_compiled_multi_io_model()
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_test_on_batch_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
# Set weights for one output.
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_2': self.sample_weight_2,
})
self.assertAllClose(result,
self.expected_batch_result_with_weights_output_2, 1e-3)
def test_fit_generator(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output.
history = model.fit_generator(
custom_generator_multi_io(sample_weights=[None, self.sample_weight_2]),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_class_weight(self):
model = self._get_compiled_multi_io_model()
history = model.fit_generator(
custom_generator_multi_io(),
class_weight={
'output_1': self.class_weight_1,
'output_2': self.class_weight_2,
},
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
# Set weights for one output.
history = model.fit_generator(
custom_generator_multi_io(),
class_weight={'output_2': self.class_weight_2},
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights_output_2.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval_generator(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_generator_with_sample_weight(self):
model = self._get_compiled_multi_io_model()
eval_result = model.evaluate_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
# Set weights for one output.
eval_result = model.evaluate_generator(
custom_generator_multi_io(sample_weights=[None, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result,
self.expected_batch_result_with_weights_output_2, 1e-3)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):
def _get_model(self):
x = layers.Dense(3, kernel_initializer='ones', trainable=False)
out = layers.Dense(
1, kernel_initializer='ones', name='output', trainable=False)
model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
weighted_metrics=[
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def _custom_generator(self, sample_weight=None):
batch_size = 2
num_samples = 4
x = np.asarray([[1.], [2.], [3.], [4.]])
y = np.asarray([[2.], [4.], [6.], [8.]])
w = sample_weight
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
yield x[start:end], y[start:end], None if w is None else w[start:end]
def setUp(self):
super(TestMetricsCorrectnessSingleIO, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y = np.asarray([[2.], [4.], [6.], [8.]])
self.sample_weight = np.asarray([2., 3., 4., 5.])
self.class_weight = {2: 2, 4: 3, 6: 4, 8: 5}
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Metric:
# Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30,
# Count = 2 + 2
# Result = 7.5
# Weighted metric:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130
# Count = (2 + 3) + (4 + 5)
# Result = 9.2857141
# Total loss with weights:
# Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +
# ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)
# = 130,
# Count = 2 + 2
# Result = 32.5
# Total loss without weights:
# Total = ((3 - 2)^2 + (6 - 4)^2) +
# ((9 - 6)^2 + (12 - 8)^2)
# = 30,
# Count = 2 + 2
# Result = 7.5
wmse = 'mean_squared_error_2'
if not tf2.enabled():
wmse = 'weighted_' + wmse
self.expected_fit_result_with_weights = {
'mean_squared_error': [7.5, 7.5],
wmse: [9.286, 9.286],
'loss': [32.5, 32.5]
}
self.expected_fit_result = {
'mean_squared_error': [7.5, 7.5],
wmse: [7.5, 7.5],
'loss': [7.5, 7.5]
}
# In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2'
self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]
self.expected_batch_result = [7.5, 7.5, 7.5]
def test_fit(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_sample_weight(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
sample_weight=self.sample_weight,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_with_class_weight(self):
model = self._get_model()
history = model.fit(
self.x,
self.y,
class_weight=self.class_weight,
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval(self):
model = self._get_model()
eval_result = model.evaluate(self.x, self.y, batch_size=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_with_sample_weight(self):
model = self._get_model()
eval_result = model.evaluate(
self.x, self.y, batch_size=2, sample_weight=self.sample_weight)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 1))
y = np.random.random((50, 1))
w = np.random.random((50,))
mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]
mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]
self.assertAllClose(mse1, mse2, 1e-3)
def test_train_on_batch(self):
model = self._get_model()
result = model.train_on_batch(self.x, self.y)
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_train_on_batch_with_sample_weight(self):
model = self._get_model()
result = model.train_on_batch(
self.x, self.y, sample_weight=self.sample_weight)
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
def test_train_on_batch_with_class_weight(self):
model = self._get_model()
result = model.train_on_batch(
self.x, self.y, class_weight=self.class_weight)
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
def test_test_on_batch(self):
model = self._get_model()
result = model.test_on_batch(self.x, self.y)
self.assertAllClose(result, self.expected_batch_result, 1e-3)
def test_test_on_batch_with_sample_weight(self):
model = self._get_model()
result = model.test_on_batch(
self.x, self.y, sample_weight=self.sample_weight)
self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)
def test_fit_generator(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(), steps_per_epoch=2, epochs=2)
for key, value in self.expected_fit_result.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_sample_weight(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(sample_weight=self.sample_weight),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_fit_generator_with_class_weight(self):
model = self._get_model()
history = model.fit_generator(
self._custom_generator(),
steps_per_epoch=2,
epochs=2,
class_weight=self.class_weight)
for key, value in self.expected_fit_result_with_weights.items():
self.assertAllClose(history.history[key], value, 1e-3)
def test_eval_generator(self):
model = self._get_model()
eval_result = model.evaluate_generator(self._custom_generator(), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)
def test_eval_generator_with_sample_weight(self):
model = self._get_model()
eval_result = model.evaluate_generator(
self._custom_generator(sample_weight=self.sample_weight), steps=2)
self.assertAllClose(eval_result, self.expected_batch_result_with_weights,
1e-3)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes
@parameterized.parameters([
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_reduction.ReductionV2.AUTO,
loss_reduction.ReductionV2.SUM
])
class TestOutputLossMetrics(keras_parameterized.TestCase):
def _get_compiled_multi_io_model(self, loss):
model = get_multi_io_model()
model.compile(
optimizer='rmsprop',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def setUp(self):
super(TestOutputLossMetrics, self).setUp()
self.x = np.asarray([[1.], [2.], [3.], [4.]])
self.y1 = np.asarray([[2.], [4.], [6.], [8.]])
self.y2 = np.asarray([[1.], [2.], [3.], [4.]])
self.sample_weight_1 = np.asarray([2., 3., 4., 5.])
self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])
# y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]
# Loss `output_1`:
# Per-sample weighted losses
# Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12]
# Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80]
# Result (reduction=SUM) = ((2 + 12) + (36 + 80))/2 = 65
# Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 130 / 4 = 32.5
# Loss `output_2`:
# Per-sample weighted losses
# Batch 1 = [(3 - 1)^2 * 3.5, (6 - 2)^2 * 2.5)] = [14, 40]
# Batch 2 = [(9 - 3)^2 * 1.5, (12 - 4)^2 * 0.5)] = [54, 32]
# Result (reduction=SUM) = ((14 + 40) + (54 + 32))/2 = 70
# Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 140 / 4 = 35
# When reduction is 'NONE' loss value that is passed to the optimizer will
# be vector loss but what is reported is a scalar, which is an average of
# all the values in all the batch vectors.
# Total loss = Output_loss_1 + Output_loss_2
sum_over_batch_size_fit_result = {
'loss': [67.5, 67.5],
'output_1_loss': [32.5, 32.5],
'output_2_loss': [35, 35],
}
self.expected_fit_result = {
loss_reduction.ReductionV2.NONE:
sum_over_batch_size_fit_result,
loss_reduction.ReductionV2.SUM: {
'loss': [135, 135],
'output_1_loss': [65, 65],
'output_2_loss': [70, 70],
},
loss_reduction.ReductionV2.AUTO:
sum_over_batch_size_fit_result,
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE:
sum_over_batch_size_fit_result,
}
# In the order: 'loss', 'output_1_loss', 'output_2_loss',
self.expected_batch_result = {
loss_reduction.ReductionV2.NONE: [67.5, 32.5, 35],
loss_reduction.ReductionV2.SUM: [135, 65, 70],
loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35],
}
def test_fit(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
history = model.fit([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
},
batch_size=2,
epochs=2,
shuffle=False)
for key, value in self.expected_fit_result[reduction].items():
self.assertAllClose(history.history[key], value)
def test_eval(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],
batch_size=2,
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
self.assertAllClose(eval_result, self.expected_batch_result[reduction])
def test_train_on_batch(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
expected_values = self.expected_batch_result[reduction]
if reduction == loss_reduction.ReductionV2.SUM:
# We are taking all the data as one batch, so undo the averaging here.
expected_values = [x * 2 for x in self.expected_batch_result[reduction]]
self.assertAllClose(result, expected_values)
def test_test_on_batch(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],
sample_weight={
'output_1': self.sample_weight_1,
'output_2': self.sample_weight_2,
})
expected_values = self.expected_batch_result[reduction]
if reduction == loss_reduction.ReductionV2.SUM:
# We are taking all the data as one batch, so undo the averaging here.
expected_values = [x * 2 for x in self.expected_batch_result[reduction]]
self.assertAllClose(result, expected_values)
def test_fit_generator(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
history = model.fit_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps_per_epoch=2,
epochs=2)
for key, value in self.expected_fit_result[reduction].items():
self.assertAllClose(history.history[key], value)
def test_eval_generator(self, reduction):
model = self._get_compiled_multi_io_model(
loss=losses.MeanSquaredError(reduction=reduction))
eval_result = model.evaluate_generator(
custom_generator_multi_io(
sample_weights=[self.sample_weight_1, self.sample_weight_2]),
steps=2)
self.assertAllClose(eval_result, self.expected_batch_result[reduction])
if __name__ == '__main__':
test.main()
|
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests metrics correctness using Keras model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.ops.losses import loss_reduction\nfrom tensorflow.python.platform import test\n\n\ndef get_multi_io_model():\n inp_1 = layers.Input(shape=(1,), name='input_1')\n inp_2 = layers.Input(shape=(1,), name='input_2')\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out_1 = layers.Dense(\n 1, kernel_initializer='ones', name='output_1', trainable=False)\n out_2 = layers.Dense(\n 1, kernel_initializer='ones', name='output_2', trainable=False)\n\n branch_a = [inp_1, x, out_1]\n branch_b = [inp_2, x, out_2]\n return testing_utils.get_multi_io_model(branch_a, branch_b)\n\n\ndef custom_generator_multi_io(sample_weights=None):\n batch_size = 2\n num_samples = 4\n inputs = np.asarray([[1.], [2.], [3.], [4.]])\n targets_1 = np.asarray([[2.], [4.], [6.], [8.]])\n targets_2 = np.asarray([[1.], [2.], [3.], [4.]])\n if sample_weights:\n assert len(sample_weights) == 2\n w1 = sample_weights[0]\n w2 = sample_weights[1]\n else:\n w1 = None\n w2 = None\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n x = [inputs[start:end], inputs[start:end]]\n y = [targets_1[start:end], targets_2[start:end]]\n if sample_weights:\n w = [\n None if w1 is None else w1[start:end],\n None if w2 is None else w2[start:end]\n ]\n else:\n w = None\n yield x, y, w\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(\n optimizer='rmsprop',\n loss='mse',\n metrics=[metrics.MeanSquaredError(name='mean_squared_error')],\n weighted_metrics=[\n metrics.MeanSquaredError(name='mean_squared_error_2')\n ],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.], [2.], [3.], [4.]])\n self.y1 = np.asarray([[2.], [4.], [6.], [8.]])\n self.y2 = np.asarray([[1.], [2.], [3.], [4.]])\n self.sample_weight_1 = np.asarray([2., 3., 4., 5.])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {2: 2, 4: 3, 6: 4, 8: 5}\n self.class_weight_2 = {1: 3.5, 2: 2.5, 3: 1.5, 4: 0.5}\n\n # y_true_1 = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]\n # y_true_2 = [[1.], [2.], [3.], [4.]], y_pred = [[3.], [6.], [9.], [12.]]\n\n # Weighted metric `output_1`:\n # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +\n # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)\n # = 130\n # Count = (2 + 3) + (4 + 5)\n # Result = 9.2857141\n\n # Weighted metric `output_2`:\n # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) +\n # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5)\n # = 140\n # Count = (3.5 + 2.5) + (1.5 + 0.5)\n # Result = 17.5\n\n # Loss `output_1` with weights:\n # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +\n # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)\n # = 130\n # Count = 2 + 2\n # Result = 32.5\n\n # Loss `output_1` without weights/Metric `output_1`:\n # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30\n # Count = 2 + 2\n # Result = 7.5\n\n # Loss `output_2` with weights:\n # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) +\n # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5)\n # = 140\n # Count = 2 + 2\n # Result = 35\n\n # Loss `output_2` without weights/Metric `output_2`:\n # Total = ((3 - 1)^2 + (6 - 2)^2) + ((9 - 3)^2 + (12 - 4)^2) = 120\n # Count = 2 + 2\n # Result = 30\n\n # Total loss with weights = 32.5 + 35 = 67.5\n # Total loss without weights = 7.5 + 30 = 37.5\n\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30],\n 'output_1_' + self.wmse: [9.286, 9.286],\n 'output_2_' + self.wmse: [17.5, 17.5],\n 'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5],\n 'output_2_loss': [35, 35],\n }\n\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30],\n 'output_1_' + self.wmse: [7.5, 7.5],\n 'output_2_' + self.wmse: [17.5, 17.5],\n 'loss': [42.5, 42.5],\n 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35],\n }\n\n self.expected_fit_result = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30],\n 'output_1_' + self.wmse: [7.5, 7.5],\n 'output_2_' + self.wmse: [30, 30],\n 'loss': [37.5, 37.5],\n 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30],\n }\n\n # In the order: 'loss', 'output_1_loss', 'output_2_loss',\n # 'output_1_mean_squared_error', 'output_1_mean_squared_error_2',\n # 'output_2_mean_squared_error', 'output_2_mean_squared_error_2'\n self.expected_batch_result_with_weights = [\n 67.5, 32.5, 35, 7.5, 9.286, 30, 17.5\n ]\n self.expected_batch_result_with_weights_output_2 = [\n 42.5, 7.5, 35, 7.5, 7.5, 30, 17.5\n ]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n },\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n # Set weights for one output (use batch size).\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2},\n batch_size=2,\n epochs=2,\n shuffle=False)\n\n for key, value in self.expected_fit_result_with_weights_output_2.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={\n 'output_1': self.class_weight_1,\n 'output_2': self.class_weight_2,\n },\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n # Set weights for one output.\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2},\n batch_size=2,\n epochs=2,\n shuffle=False)\n\n for key, value in self.expected_fit_result_with_weights_output_2.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2,\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(eval_result, self.expected_batch_result_with_weights,\n 1e-3)\n\n # Set weights for one output.\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2,\n sample_weight={\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(eval_result,\n self.expected_batch_result_with_weights_output_2, 1e-3)\n\n # Verify that metric value is same with arbitrary weights and batch size.\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 1e-3)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 1e-3)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)\n\n # Set weights for one output.\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(result,\n self.expected_batch_result_with_weights_output_2, 1e-3)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={\n 'output_1': self.class_weight_1,\n 'output_2': self.class_weight_2,\n })\n self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)\n\n # Set weights for one output.\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={\n 'output_2': self.class_weight_2,\n })\n self.assertAllClose(result,\n self.expected_batch_result_with_weights_output_2, 1e-3)\n\n def test_test_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 1e-3)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)\n\n # Set weights for one output.\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(result,\n self.expected_batch_result_with_weights_output_2, 1e-3)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(\n custom_generator_multi_io(), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(\n custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n # Set weights for one output.\n history = model.fit_generator(\n custom_generator_multi_io(sample_weights=[None, self.sample_weight_2]),\n steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(\n custom_generator_multi_io(),\n class_weight={\n 'output_1': self.class_weight_1,\n 'output_2': self.class_weight_2,\n },\n steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n # Set weights for one output.\n history = model.fit_generator(\n custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2},\n steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(\n custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result_with_weights,\n 1e-3)\n\n # Set weights for one output.\n eval_result = model.evaluate_generator(\n custom_generator_multi_io(sample_weights=[None, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result,\n self.expected_batch_result_with_weights_output_2, 1e-3)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(\n 1, kernel_initializer='ones', name='output', trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(\n optimizer='rmsprop',\n loss='mse',\n metrics=[metrics.MeanSquaredError(name='mean_squared_error')],\n weighted_metrics=[\n metrics.MeanSquaredError(name='mean_squared_error_2')\n ],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.], [2.], [3.], [4.]])\n y = np.asarray([[2.], [4.], [6.], [8.]])\n w = sample_weight\n i = 0\n\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start:end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.], [2.], [3.], [4.]])\n self.y = np.asarray([[2.], [4.], [6.], [8.]])\n self.sample_weight = np.asarray([2., 3., 4., 5.])\n self.class_weight = {2: 2, 4: 3, 6: 4, 8: 5}\n\n # y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]\n\n # Metric:\n # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30,\n # Count = 2 + 2\n # Result = 7.5\n\n # Weighted metric:\n # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +\n # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)\n # = 130\n # Count = (2 + 3) + (4 + 5)\n # Result = 9.2857141\n\n # Total loss with weights:\n # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) +\n # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5)\n # = 130,\n # Count = 2 + 2\n # Result = 32.5\n\n # Total loss without weights:\n # Total = ((3 - 2)^2 + (6 - 4)^2) +\n # ((9 - 6)^2 + (12 - 8)^2)\n # = 30,\n # Count = 2 + 2\n # Result = 7.5\n\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n\n self.expected_fit_result_with_weights = {\n 'mean_squared_error': [7.5, 7.5],\n wmse: [9.286, 9.286],\n 'loss': [32.5, 32.5]\n }\n\n self.expected_fit_result = {\n 'mean_squared_error': [7.5, 7.5],\n wmse: [7.5, 7.5],\n 'loss': [7.5, 7.5]\n }\n\n # In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2'\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n\n history = model.fit(\n self.x,\n self.y,\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(\n self.x,\n self.y,\n sample_weight=self.sample_weight,\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(\n self.x,\n self.y,\n class_weight=self.class_weight,\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(\n self.x, self.y, batch_size=2, sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.expected_batch_result_with_weights,\n 1e-3)\n\n # Verify that metric value is same with arbitrary weights and batch size.\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 1e-3)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 1e-3)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(\n self.x, self.y, sample_weight=self.sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(\n self.x, self.y, class_weight=self.class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 1e-3)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(\n self.x, self.y, sample_weight=self.sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(\n self._custom_generator(), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(\n self._custom_generator(sample_weight=self.sample_weight),\n steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(\n self._custom_generator(),\n steps_per_epoch=2,\n epochs=2,\n class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 1e-3)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(), steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 1e-3)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(\n self._custom_generator(sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result_with_weights,\n 1e-3)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO,\n loss_reduction.ReductionV2.SUM\n])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(\n optimizer='rmsprop',\n loss=loss,\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.], [2.], [3.], [4.]])\n self.y1 = np.asarray([[2.], [4.], [6.], [8.]])\n self.y2 = np.asarray([[1.], [2.], [3.], [4.]])\n self.sample_weight_1 = np.asarray([2., 3., 4., 5.])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n\n # y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]]\n\n # Loss `output_1`:\n # Per-sample weighted losses\n # Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12]\n # Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80]\n\n # Result (reduction=SUM) = ((2 + 12) + (36 + 80))/2 = 65\n # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 130 / 4 = 32.5\n\n # Loss `output_2`:\n # Per-sample weighted losses\n # Batch 1 = [(3 - 1)^2 * 3.5, (6 - 2)^2 * 2.5)] = [14, 40]\n # Batch 2 = [(9 - 3)^2 * 1.5, (12 - 4)^2 * 0.5)] = [54, 32]\n\n # Result (reduction=SUM) = ((14 + 40) + (54 + 32))/2 = 70\n # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 140 / 4 = 35\n\n # When reduction is 'NONE' loss value that is passed to the optimizer will\n # be vector loss but what is reported is a scalar, which is an average of\n # all the values in all the batch vectors.\n\n # Total loss = Output_loss_1 + Output_loss_2\n\n sum_over_batch_size_fit_result = {\n 'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5],\n 'output_2_loss': [35, 35],\n }\n\n self.expected_fit_result = {\n loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result,\n loss_reduction.ReductionV2.SUM: {\n 'loss': [135, 135],\n 'output_1_loss': [65, 65],\n 'output_2_loss': [70, 70],\n },\n loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result,\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE:\n sum_over_batch_size_fit_result,\n }\n\n # In the order: 'loss', 'output_1_loss', 'output_2_loss',\n self.expected_batch_result = {\n loss_reduction.ReductionV2.NONE: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35],\n }\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(\n loss=losses.MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n },\n batch_size=2,\n epochs=2,\n shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(\n loss=losses.MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2,\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n })\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(\n loss=losses.MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n })\n\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n # We are taking all the data as one batch, so undo the averaging here.\n expected_values = [x * 2 for x in self.expected_batch_result[reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(\n loss=losses.MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={\n 'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2,\n })\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n # We are taking all the data as one batch, so undo the averaging here.\n expected_values = [x * 2 for x in self.expected_batch_result[reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(\n loss=losses.MeanSquaredError(reduction=reduction))\n history = model.fit_generator(\n custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(\n loss=losses.MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(\n custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\nif __name__ == '__main__':\n test.main()\n",
"<docstring token>\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tensorflow.python import tf2\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.ops.losses import loss_reduction\nfrom tensorflow.python.platform import test\n\n\ndef get_multi_io_model():\n inp_1 = layers.Input(shape=(1,), name='input_1')\n inp_2 = layers.Input(shape=(1,), name='input_2')\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out_1 = layers.Dense(1, kernel_initializer='ones', name='output_1',\n trainable=False)\n out_2 = layers.Dense(1, kernel_initializer='ones', name='output_2',\n trainable=False)\n branch_a = [inp_1, x, out_1]\n branch_b = [inp_2, x, out_2]\n return testing_utils.get_multi_io_model(branch_a, branch_b)\n\n\ndef custom_generator_multi_io(sample_weights=None):\n batch_size = 2\n num_samples = 4\n inputs = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n targets_1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n targets_2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n if sample_weights:\n assert len(sample_weights) == 2\n w1 = sample_weights[0]\n w2 = sample_weights[1]\n else:\n w1 = None\n w2 = None\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n x = [inputs[start:end], inputs[start:end]]\n y = [targets_1[start:end], targets_2[start:end]]\n if sample_weights:\n w = [None if w1 is None else w1[start:end], None if w2 is None else\n w2[start:end]]\n else:\n w = None\n yield x, y, w\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_test_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\nif __name__ == '__main__':\n test.main()\n",
"<docstring token>\n<import token>\n\n\ndef get_multi_io_model():\n inp_1 = layers.Input(shape=(1,), name='input_1')\n inp_2 = layers.Input(shape=(1,), name='input_2')\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out_1 = layers.Dense(1, kernel_initializer='ones', name='output_1',\n trainable=False)\n out_2 = layers.Dense(1, kernel_initializer='ones', name='output_2',\n trainable=False)\n branch_a = [inp_1, x, out_1]\n branch_b = [inp_2, x, out_2]\n return testing_utils.get_multi_io_model(branch_a, branch_b)\n\n\ndef custom_generator_multi_io(sample_weights=None):\n batch_size = 2\n num_samples = 4\n inputs = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n targets_1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n targets_2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n if sample_weights:\n assert len(sample_weights) == 2\n w1 = sample_weights[0]\n w2 = sample_weights[1]\n else:\n w1 = None\n w2 = None\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n x = [inputs[start:end], inputs[start:end]]\n y = [targets_1[start:end], targets_2[start:end]]\n if sample_weights:\n w = [None if w1 is None else w1[start:end], None if w2 is None else\n w2[start:end]]\n else:\n w = None\n yield x, y, w\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_test_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\nif __name__ == '__main__':\n test.main()\n",
"<docstring token>\n<import token>\n\n\ndef get_multi_io_model():\n inp_1 = layers.Input(shape=(1,), name='input_1')\n inp_2 = layers.Input(shape=(1,), name='input_2')\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out_1 = layers.Dense(1, kernel_initializer='ones', name='output_1',\n trainable=False)\n out_2 = layers.Dense(1, kernel_initializer='ones', name='output_2',\n trainable=False)\n branch_a = [inp_1, x, out_1]\n branch_b = [inp_2, x, out_2]\n return testing_utils.get_multi_io_model(branch_a, branch_b)\n\n\ndef custom_generator_multi_io(sample_weights=None):\n batch_size = 2\n num_samples = 4\n inputs = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n targets_1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n targets_2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n if sample_weights:\n assert len(sample_weights) == 2\n w1 = sample_weights[0]\n w2 = sample_weights[1]\n else:\n w1 = None\n w2 = None\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n x = [inputs[start:end], inputs[start:end]]\n y = [targets_1[start:end], targets_2[start:end]]\n if sample_weights:\n w = [None if w1 is None else w1[start:end], None if w2 is None else\n w2[start:end]]\n else:\n w = None\n yield x, y, w\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_test_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef get_multi_io_model():\n inp_1 = layers.Input(shape=(1,), name='input_1')\n inp_2 = layers.Input(shape=(1,), name='input_2')\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out_1 = layers.Dense(1, kernel_initializer='ones', name='output_1',\n trainable=False)\n out_2 = layers.Dense(1, kernel_initializer='ones', name='output_2',\n trainable=False)\n branch_a = [inp_1, x, out_1]\n branch_b = [inp_2, x, out_2]\n return testing_utils.get_multi_io_model(branch_a, branch_b)\n\n\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_test_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_test_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2])\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n <function token>\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n <function token>\n <function token>\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps_per_epoch=2,\n epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=5)[3]\n mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w],\n batch_size=10)[3]\n self.assertAllClose(mse1, mse2, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n class_weight={'output_2': self.class_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestMetricsCorrectnessMultiIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n self.class_weight_1 = {(2): 2, (4): 3, (6): 4, (8): 5}\n self.class_weight_2 = {(1): 3.5, (2): 2.5, (3): 1.5, (4): 0.5}\n self.wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n self.wmse = 'weighted_' + self.wmse\n self.expected_fit_result_with_weights = {'output_1_mean_squared_error':\n [7.5, 7.5], 'output_2_mean_squared_error': [30, 30], (\n 'output_1_' + self.wmse): [9.286, 9.286], ('output_2_' + self.\n wmse): [17.5, 17.5], 'loss': [67.5, 67.5], 'output_1_loss': [\n 32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result_with_weights_output_2 = {\n 'output_1_mean_squared_error': [7.5, 7.5],\n 'output_2_mean_squared_error': [30, 30], ('output_1_' + self.\n wmse): [7.5, 7.5], ('output_2_' + self.wmse): [17.5, 17.5],\n 'loss': [42.5, 42.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [35, 35]}\n self.expected_fit_result = {'output_1_mean_squared_error': [7.5, \n 7.5], 'output_2_mean_squared_error': [30, 30], ('output_1_' +\n self.wmse): [7.5, 7.5], ('output_2_' + self.wmse): [30, 30],\n 'loss': [37.5, 37.5], 'output_1_loss': [7.5, 7.5],\n 'output_2_loss': [30, 30]}\n self.expected_batch_result_with_weights = [67.5, 32.5, 35, 7.5, \n 9.286, 30, 17.5]\n self.expected_batch_result_with_weights_output_2 = [42.5, 7.5, 35, \n 7.5, 7.5, 30, 17.5]\n self.expected_batch_result = [37.5, 7.5, 30, 7.5, 7.5, 30, 30]\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[None, self.sample_weight_2]), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights_output_2, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_2': self.sample_weight_2}, batch_size=2,\n epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_compiled_multi_io_model()\n eval_result = model.evaluate_generator(custom_generator_multi_io(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n\n def test_fit(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_compiled_multi_io_model()\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_1': self.class_weight_1, 'output_2': self\n .class_weight_2}, steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n history = model.fit_generator(custom_generator_multi_io(),\n class_weight={'output_2': self.class_weight_2}, steps_per_epoch\n =2, epochs=2)\n for key, value in self.expected_fit_result_with_weights_output_2.items(\n ):\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n\n def setUp(self):\n super(TestMetricsCorrectnessSingleIO, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.class_weight = {(2): 2, (4): 3, (6): 4, (8): 5}\n wmse = 'mean_squared_error_2'\n if not tf2.enabled():\n wmse = 'weighted_' + wmse\n self.expected_fit_result_with_weights = {'mean_squared_error': [7.5,\n 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5]}\n self.expected_fit_result = {'mean_squared_error': [7.5, 7.5], wmse:\n [7.5, 7.5], 'loss': [7.5, 7.5]}\n self.expected_batch_result_with_weights = [32.5, 7.5, 9.286]\n self.expected_batch_result = [7.5, 7.5, 7.5]\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_test_on_batch(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate(self.x, self.y, batch_size=2,\n sample_weight=self.sample_weight)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n x = np.random.random((50, 1))\n y = np.random.random((50, 1))\n w = np.random.random((50,))\n mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1]\n mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1]\n self.assertAllClose(mse1, mse2, 0.001)\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_fit_generator(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_generator_with_class_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(),\n steps_per_epoch=2, epochs=2, class_weight=self.class_weight)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_class_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, class_weight=self.class_weight,\n batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n\n def test_fit(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, batch_size=2, epochs=2, shuffle\n =False)\n for key, value in self.expected_fit_result.items():\n self.assertAllClose(history.history[key], value, 0.001)\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def _custom_generator(self, sample_weight=None):\n batch_size = 2\n num_samples = 4\n x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n y = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n w = sample_weight\n i = 0\n while True:\n batch_index = i * batch_size % num_samples\n i += 1\n start = batch_index\n end = start + batch_size\n yield x[start:end], y[start:end], None if w is None else w[start\n :end]\n <function token>\n <function token>\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n <function token>\n <function token>\n\n def test_fit_with_sample_weight(self):\n model = self._get_model()\n history = model.fit(self.x, self.y, sample_weight=self.\n sample_weight, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n\n def test_train_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n\n def _get_model(self):\n x = layers.Dense(3, kernel_initializer='ones', trainable=False)\n out = layers.Dense(1, kernel_initializer='ones', name='output',\n trainable=False)\n model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))\n model.compile(optimizer='rmsprop', loss='mse', metrics=[metrics.\n MeanSquaredError(name='mean_squared_error')], weighted_metrics=\n [metrics.MeanSquaredError(name='mean_squared_error_2')],\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_test_on_batch_with_sample_weight(self):\n model = self._get_model()\n result = model.test_on_batch(self.x, self.y, sample_weight=self.\n sample_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n <function token>\n <function token>\n\n def test_fit_generator_with_sample_weight(self):\n model = self._get_model()\n history = model.fit_generator(self._custom_generator(sample_weight=\n self.sample_weight), steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result_with_weights.items():\n self.assertAllClose(history.history[key], value, 0.001)\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_eval_generator(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result, 0.001)\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n\n def test_train_on_batch_with_class_weight(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y, class_weight=self.\n class_weight)\n self.assertAllClose(result, self.expected_batch_result_with_weights,\n 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_eval_generator_with_sample_weight(self):\n model = self._get_model()\n eval_result = model.evaluate_generator(self._custom_generator(\n sample_weight=self.sample_weight), steps=2)\n self.assertAllClose(eval_result, self.\n expected_batch_result_with_weights, 0.001)\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self):\n model = self._get_model()\n result = model.train_on_batch(self.x, self.y)\n self.assertAllClose(result, self.expected_batch_result, 0.001)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2],\n batch_size=2, sample_weight={'output_1': self.sample_weight_1,\n 'output_2': self.sample_weight_2})\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_test_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.test_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n <function token>\n\n def test_fit_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps_per_epoch=2, epochs=2)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n\n def _get_compiled_multi_io_model(self, loss):\n model = get_multi_io_model()\n model.compile(optimizer='rmsprop', loss=loss, run_eagerly=\n testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function()\n )\n return model\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n <function token>\n <function token>\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n <function token>\n\n def setUp(self):\n super(TestOutputLossMetrics, self).setUp()\n self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0]])\n self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0]])\n self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0])\n self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5])\n sum_over_batch_size_fit_result = {'loss': [67.5, 67.5],\n 'output_1_loss': [32.5, 32.5], 'output_2_loss': [35, 35]}\n self.expected_fit_result = {loss_reduction.ReductionV2.NONE:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.SUM:\n {'loss': [135, 135], 'output_1_loss': [65, 65], 'output_2_loss':\n [70, 70]}, loss_reduction.ReductionV2.AUTO:\n sum_over_batch_size_fit_result, loss_reduction.ReductionV2.\n SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result}\n self.expected_batch_result = {loss_reduction.ReductionV2.NONE: [\n 67.5, 32.5, 35], loss_reduction.ReductionV2.SUM: [135, 65, 70],\n loss_reduction.ReductionV2.AUTO: [67.5, 32.5, 35],\n loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE: [67.5, 32.5, 35]}\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n <function token>\n <function token>\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n <function token>\n <function token>\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n <function token>\n <function token>\n\n def test_eval_generator(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n eval_result = model.evaluate_generator(custom_generator_multi_io(\n sample_weights=[self.sample_weight_1, self.sample_weight_2]),\n steps=2)\n self.assertAllClose(eval_result, self.expected_batch_result[reduction])\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n <function token>\n <function token>\n\n def test_fit(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n history = model.fit([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False)\n for key, value in self.expected_fit_result[reduction].items():\n self.assertAllClose(history.history[key], value)\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_train_on_batch(self, reduction):\n model = self._get_compiled_multi_io_model(loss=losses.\n MeanSquaredError(reduction=reduction))\n result = model.train_on_batch([self.x, self.x], [self.y1, self.y2],\n sample_weight={'output_1': self.sample_weight_1, 'output_2':\n self.sample_weight_2})\n expected_values = self.expected_batch_result[reduction]\n if reduction == loss_reduction.ReductionV2.SUM:\n expected_values = [(x * 2) for x in self.expected_batch_result[\n reduction]]\n self.assertAllClose(result, expected_values)\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\n@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])\n@keras_parameterized.run_all_keras_modes\[email protected]([loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,\n loss_reduction.ReductionV2.AUTO, loss_reduction.ReductionV2.SUM])\nclass TestOutputLossMetrics(keras_parameterized.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
98,428 |
0aad52a73ee8b49e57ebf502ac61b8127921d090
|
'''
EJERCICIO
Notros también, como , queremos ver la lluvia en África. Pero como no tenemos
presupuesto para irnos tan lejos, os hemos traído un mapa de precipitaciones
anuales (pre_mean--ssa.tif)...
6. ¿Cuál es el valor medio de precipitaciones?
'''
import matplotlib
from matplotlib import image
from matplotlib import pyplot
import numpy as np
import imageio
import matplotlib.pyplot as plt
imagen = "pre_mean_ssa.tif\\pre_mean--SSA.tif"
# cargamos la imagen como un array de pixeles
mdt_imagen = imageio.imread(imagen)
print(f"¿Cuál es el valor medio de las precipitaciones? = {mdt_imagen.mean()}")
print(f"¿Cuál es el valor mínimo de las precipitaciones? = {mdt_imagen.min()}")
print(f"¿Cuál es el valor máximo de las precipitaciones? = {mdt_imagen.max()}")
|
[
"'''\r\nEJERCICIO\r\nNotros también, como , queremos ver la lluvia en África. Pero como no tenemos\r\npresupuesto para irnos tan lejos, os hemos traído un mapa de precipitaciones\r\nanuales (pre_mean--ssa.tif)...\r\n\r\n6. ¿Cuál es el valor medio de precipitaciones?\r\n'''\r\nimport matplotlib\r\nfrom matplotlib import image\r\nfrom matplotlib import pyplot\r\nimport numpy as np\r\nimport imageio\r\nimport matplotlib.pyplot as plt\r\n\r\nimagen = \"pre_mean_ssa.tif\\\\pre_mean--SSA.tif\"\r\n# cargamos la imagen como un array de pixeles\r\nmdt_imagen = imageio.imread(imagen)\r\n\r\n\r\nprint(f\"¿Cuál es el valor medio de las precipitaciones? = {mdt_imagen.mean()}\")\r\nprint(f\"¿Cuál es el valor mínimo de las precipitaciones? = {mdt_imagen.min()}\")\r\nprint(f\"¿Cuál es el valor máximo de las precipitaciones? = {mdt_imagen.max()}\")\r\n",
"<docstring token>\nimport matplotlib\nfrom matplotlib import image\nfrom matplotlib import pyplot\nimport numpy as np\nimport imageio\nimport matplotlib.pyplot as plt\nimagen = 'pre_mean_ssa.tif\\\\pre_mean--SSA.tif'\nmdt_imagen = imageio.imread(imagen)\nprint(f'¿Cuál es el valor medio de las precipitaciones? = {mdt_imagen.mean()}')\nprint(f'¿Cuál es el valor mínimo de las precipitaciones? = {mdt_imagen.min()}')\nprint(f'¿Cuál es el valor máximo de las precipitaciones? = {mdt_imagen.max()}')\n",
"<docstring token>\n<import token>\nimagen = 'pre_mean_ssa.tif\\\\pre_mean--SSA.tif'\nmdt_imagen = imageio.imread(imagen)\nprint(f'¿Cuál es el valor medio de las precipitaciones? = {mdt_imagen.mean()}')\nprint(f'¿Cuál es el valor mínimo de las precipitaciones? = {mdt_imagen.min()}')\nprint(f'¿Cuál es el valor máximo de las precipitaciones? = {mdt_imagen.max()}')\n",
"<docstring token>\n<import token>\n<assignment token>\nprint(f'¿Cuál es el valor medio de las precipitaciones? = {mdt_imagen.mean()}')\nprint(f'¿Cuál es el valor mínimo de las precipitaciones? = {mdt_imagen.min()}')\nprint(f'¿Cuál es el valor máximo de las precipitaciones? = {mdt_imagen.max()}')\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
98,429 |
1531662a21eddce05cbcb6647f5620e68338bcc7
|
from ddtrace.settings import Config, IntegrationConfig, HttpConfig
class TestHttpConfig(object):
def test_trace_headers(self):
http_config = HttpConfig()
http_config.trace_headers('some_header')
assert http_config.header_is_traced('some_header')
assert not http_config.header_is_traced('some_other_header')
def test_trace_headers_whitelist_case_insensitive(self):
http_config = HttpConfig()
http_config.trace_headers('some_header')
assert http_config.header_is_traced('sOmE_hEaDeR')
assert not http_config.header_is_traced('some_other_header')
def test_trace_multiple_headers(self):
http_config = HttpConfig()
http_config.trace_headers(['some_header_1', 'some_header_2'])
assert http_config.header_is_traced('some_header_1')
assert http_config.header_is_traced('some_header_2')
assert not http_config.header_is_traced('some_header_3')
def test_empty_entry_do_not_raise_exception(self):
http_config = HttpConfig()
http_config.trace_headers('')
assert not http_config.header_is_traced('some_header_1')
def test_none_entry_do_not_raise_exception(self):
http_config = HttpConfig()
http_config.trace_headers(None)
assert not http_config.header_is_traced('some_header_1')
def test_is_header_tracing_configured(self):
http_config = HttpConfig()
assert not http_config.is_header_tracing_configured
http_config.trace_headers('some_header')
assert http_config.is_header_tracing_configured
def test_header_is_traced_case_insensitive(self):
http_config = HttpConfig()
http_config.trace_headers('sOmE_hEaDeR')
assert http_config.header_is_traced('SoMe_HeAdEr')
assert not http_config.header_is_traced('some_other_header')
def test_header_is_traced_false_for_empty_header(self):
http_config = HttpConfig()
http_config.trace_headers('some_header')
assert not http_config.header_is_traced('')
def test_header_is_traced_false_for_none_header(self):
http_config = HttpConfig()
http_config.trace_headers('some_header')
assert not http_config.header_is_traced(None)
class TestIntegrationConfig(object):
def test_is_a_dict(self):
integration_config = IntegrationConfig(Config())
assert isinstance(integration_config, dict)
def test_allow_configuring_http(self):
global_config = Config()
integration_config = IntegrationConfig(global_config)
integration_config.http.trace_headers('integration_header')
assert integration_config.http.header_is_traced('integration_header')
assert not integration_config.http.header_is_traced('other_header')
def test_allow_exist_both_global_and_integration_config(self):
global_config = Config()
integration_config = IntegrationConfig(global_config)
global_config.trace_headers('global_header')
assert integration_config.header_is_traced('global_header')
integration_config.http.trace_headers('integration_header')
assert integration_config.header_is_traced('integration_header')
assert not integration_config.header_is_traced('global_header')
assert not global_config.header_is_traced('integration_header')
|
[
"from ddtrace.settings import Config, IntegrationConfig, HttpConfig\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n\n def test_none_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers(None)\n assert not http_config.header_is_traced('some_header_1')\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n\n def test_header_is_traced_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('sOmE_hEaDeR')\n assert http_config.header_is_traced('SoMe_HeAdEr')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_header_is_traced_false_for_empty_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced('')\n\n def test_header_is_traced_false_for_none_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced(None)\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"from ddtrace.settings import Config, IntegrationConfig, HttpConfig\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n\n def test_none_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers(None)\n assert not http_config.header_is_traced('some_header_1')\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n\n def test_header_is_traced_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('sOmE_hEaDeR')\n assert http_config.header_is_traced('SoMe_HeAdEr')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_header_is_traced_false_for_empty_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced('')\n\n def test_header_is_traced_false_for_none_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced(None)\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n\n def test_none_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers(None)\n assert not http_config.header_is_traced('some_header_1')\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n\n def test_header_is_traced_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('sOmE_hEaDeR')\n assert http_config.header_is_traced('SoMe_HeAdEr')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_header_is_traced_false_for_empty_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced('')\n\n def test_header_is_traced_false_for_none_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced(None)\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n\n def test_none_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers(None)\n assert not http_config.header_is_traced('some_header_1')\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n\n def test_header_is_traced_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('sOmE_hEaDeR')\n assert http_config.header_is_traced('SoMe_HeAdEr')\n assert not http_config.header_is_traced('some_other_header')\n <function token>\n\n def test_header_is_traced_false_for_none_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced(None)\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n\n def test_none_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers(None)\n assert not http_config.header_is_traced('some_header_1')\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n <function token>\n <function token>\n\n def test_header_is_traced_false_for_none_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced(None)\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n <function token>\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n <function token>\n <function token>\n\n def test_header_is_traced_false_for_none_header(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert not http_config.header_is_traced(None)\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n <function token>\n\n def test_is_header_tracing_configured(self):\n http_config = HttpConfig()\n assert not http_config.is_header_tracing_configured\n http_config.trace_headers('some_header')\n assert http_config.is_header_tracing_configured\n <function token>\n <function token>\n <function token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_multiple_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers(['some_header_1', 'some_header_2'])\n assert http_config.header_is_traced('some_header_1')\n assert http_config.header_is_traced('some_header_2')\n assert not http_config.header_is_traced('some_header_3')\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n <function token>\n\n def test_empty_entry_do_not_raise_exception(self):\n http_config = HttpConfig()\n http_config.trace_headers('')\n assert not http_config.header_is_traced('some_header_1')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n\n def test_trace_headers(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('some_header')\n assert not http_config.header_is_traced('some_other_header')\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n <function token>\n\n def test_trace_headers_whitelist_case_insensitive(self):\n http_config = HttpConfig()\n http_config.trace_headers('some_header')\n assert http_config.header_is_traced('sOmE_hEaDeR')\n assert not http_config.header_is_traced('some_other_header')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n\n\nclass TestHttpConfig(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n<class token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n\n def test_allow_exist_both_global_and_integration_config(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n global_config.trace_headers('global_header')\n assert integration_config.header_is_traced('global_header')\n integration_config.http.trace_headers('integration_header')\n assert integration_config.header_is_traced('integration_header')\n assert not integration_config.header_is_traced('global_header')\n assert not global_config.header_is_traced('integration_header')\n",
"<import token>\n<class token>\n\n\nclass TestIntegrationConfig(object):\n\n def test_is_a_dict(self):\n integration_config = IntegrationConfig(Config())\n assert isinstance(integration_config, dict)\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n <function token>\n",
"<import token>\n<class token>\n\n\nclass TestIntegrationConfig(object):\n <function token>\n\n def test_allow_configuring_http(self):\n global_config = Config()\n integration_config = IntegrationConfig(global_config)\n integration_config.http.trace_headers('integration_header')\n assert integration_config.http.header_is_traced('integration_header')\n assert not integration_config.http.header_is_traced('other_header')\n <function token>\n",
"<import token>\n<class token>\n\n\nclass TestIntegrationConfig(object):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
98,430 |
c5d76b3a799da79ff3dfdcbc471babec36ea47f5
|
from collections import OrderedDict
from natsort import natsorted
import mrcfile, argparse, pickle
import glob, time, os, utils
import numpy as np
"""
Compile all stitched tiles together into a tilt-series and save in MRC format. Tiles are
ordered as projection images were collected unless command line argument reorder is True,
in which case the order is from the most negative to positive tilt angle. Also optionally
save a tilt angles file (.tlt) for use in downstream reconstruction.
"""
def parse_commandline():
"""
Parse commandline input.
"""
parser = argparse.ArgumentParser(description='Generate a tilt-series from a set of sttiched tiles.')
parser.add_argument('-i','--stitched_prefix', help='Path to prefix file name of stitched images',
required=True, type=str)
parser.add_argument('-v','--voxel_path', help='Path to MRC file with voxel size in header',
required=False, type=str)
parser.add_argument('-c','--centers', help='Path to input beam centers file',
required=True, type=str)
parser.add_argument('-p','--params', help='Path to circle-fitting parameters dictionary',
required=True, type=str)
parser.add_argument('-o','--output', help='Output path for tilt stack',
required=True, type=str)
parser.add_argument('-w','--width', help='Length in pixels of retained area of each stitched image',
required=True, type=int)
parser.add_argument('-r','--rotation', help='Global rotation to apply to all beam centers',
required=False, type=float, default=0)
parser.add_argument('-re','--reorder', help='Reorder tilts by increasing angle rather than by data collection',
action='store_true') # defaults to False if argument is not supplied
parser.add_argument('-t','--tilt_file', help='Output path for tilt angles file, ordered as tilt series',
required=False, type=str)
parser.add_argument('-e','--exclude_angles', help='List of tilt angles to exclude (space-separated)',
required=False, nargs='+', type=int)
return vars(parser.parse_args())
def modify_args(args):
"""
Modify command line arguments and add additional information to keys.
"""
if args['voxel_path'] is None:
args['voxel_path'] = args['stitched_prefix'] + "0.mrc"
mrc = mrcfile.open(args['voxel_path'])
args['voxel_size'] = float(mrc.voxel_size.x) # Angstrom / pixel
mrc.close()
args['params'] = pickle.load(open(args['params'], "rb"))
if args['exclude_angles'] is None:
args['exclude_angles'] = np.array(list())
return args
def retrieve_beam_centers(centers_file, voxel_size):
"""
Retrieve the position of the central tile for each tilt angle and convert from
microns to pixels
Parameters
----------
centers_file : string
filename of predicted beam centers, where x,y coordinates of each
tile are listed in um on separate lines after the relevant tilt angle
voxel_size : float
voxel dimensions in A/pixel
Returns
-------
origin_shifts : numpy.ndarray, shape (N, 2)
global origin shifts applied to each tilt angle
tilt_angles : numpy.ndarray, shape (N,)
tilt angles ordered as images were collected
"""
origin_shifts, tilt_angles = list(), list()
f = open(centers_file, 'r')
content = f.readlines()
# extract position of tile 0 for each tilt angle
for line in content:
as_array = np.array(line.strip().split()).astype(float)
if (len(as_array) == 1):
tilt_angles.append(as_array[0])
counter = 0
elif (len(as_array) >= 2) and (counter==0):
origin_shifts.append(as_array * 1e4/voxel_size)
counter += 1
origin_shifts = np.array(origin_shifts)[:,:2]
return origin_shifts, np.array(tilt_angles)
def stack_stitched(args):
"""
Crop and then stack stitched tilt images into a tilt-series, accounting
for the global offset in the tile positions between tilt angles.
Parameters
----------
args: dictionary
command line arguments
Returns
-------
tilt_series : numpy.ndarray, shape (n_tilts, N, N)
coarsely-aligned tilt-stack
tilts : numpy.ndarray, shape (N,)
tilt angles ordered as images in tilt_series
"""
# retrieve origin shifts and tilt angles
shifts, all_tilts = retrieve_beam_centers(args['centers'], args['voxel_size'])
shifts = utils.apply_rotation(shifts, args['rotation'])
shifts = np.fliplr(shifts)
t_shifts = OrderedDict((key,val) for key,val in zip(all_tilts,shifts))
# retrieve all processed tilts and optionally reorder tilt series by angle
tilts = np.array(list(args['params'].keys()))
if args['reorder'] is True:
tilts = np.array(sorted(tilts))
retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])
# translational offsets due to spiral
offsets = np.array([t_shifts[xi] for xi in retained_tilts])
# generate empty tilt-series array
tilt_series = np.zeros((len(retained_tilts),args['width']*2,args['width']*2))
for xi,t in enumerate(retained_tilts):
# load image and retrieve center coordinates
image = mrcfile.open(args['stitched_prefix'] + f"{int(t)}.mrc").data
xc, yc = (np.array(image.shape)/2).astype(int)
# spiral translational offsets, accounting for change due to projection
x_offset, y_offset = np.around(offsets[xi]).astype(int)
x_offset *= np.cos(np.deg2rad(t))
x_offset = np.around(x_offset).astype(int)
tilt_series[xi] = image[xc-args['width']-x_offset: xc+args['width']-x_offset,
yc-args['width']-y_offset: yc+args['width']-y_offset]
return tilt_series, retained_tilts
def main():
start_time = time.time()
args = parse_commandline()
args = modify_args(args)
# generate tilt stack and save to .mrc format
tseries, retained_tilts = stack_stitched(args)
utils.save_mrc(tseries, args['output'], args['voxel_size'])
# optionally save a corresponding .tlt file for IMOD
if args['tilt_file'] is not None:
np.savetxt(args['tilt_file'], retained_tilts, fmt="%i", delimiter='\n')
print(f"elapsed time is {((time.time()-start_time)/60.0):.2f} minutes")
if __name__ == '__main__':
main()
|
[
"from collections import OrderedDict\nfrom natsort import natsorted\nimport mrcfile, argparse, pickle\nimport glob, time, os, utils\nimport numpy as np\n\n\"\"\"\nCompile all stitched tiles together into a tilt-series and save in MRC format. Tiles are \nordered as projection images were collected unless command line argument reorder is True,\nin which case the order is from the most negative to positive tilt angle. Also optionally \nsave a tilt angles file (.tlt) for use in downstream reconstruction. \n\"\"\"\n\ndef parse_commandline():\n \"\"\"\n Parse commandline input.\n \"\"\"\n parser = argparse.ArgumentParser(description='Generate a tilt-series from a set of sttiched tiles.')\n parser.add_argument('-i','--stitched_prefix', help='Path to prefix file name of stitched images', \n required=True, type=str)\n parser.add_argument('-v','--voxel_path', help='Path to MRC file with voxel size in header',\n required=False, type=str)\n parser.add_argument('-c','--centers', help='Path to input beam centers file',\n required=True, type=str)\n parser.add_argument('-p','--params', help='Path to circle-fitting parameters dictionary',\n required=True, type=str)\n parser.add_argument('-o','--output', help='Output path for tilt stack',\n required=True, type=str)\n parser.add_argument('-w','--width', help='Length in pixels of retained area of each stitched image',\n required=True, type=int)\n parser.add_argument('-r','--rotation', help='Global rotation to apply to all beam centers',\n required=False, type=float, default=0)\n parser.add_argument('-re','--reorder', help='Reorder tilts by increasing angle rather than by data collection',\n action='store_true') # defaults to False if argument is not supplied\n parser.add_argument('-t','--tilt_file', help='Output path for tilt angles file, ordered as tilt series',\n required=False, type=str)\n parser.add_argument('-e','--exclude_angles', help='List of tilt angles to exclude (space-separated)',\n required=False, nargs='+', type=int)\n\n return vars(parser.parse_args())\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n \n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + \"0.mrc\"\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x) # Angstrom / pixel\n mrc.close()\n\n args['params'] = pickle.load(open(args['params'], \"rb\"))\n\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n\n return args\n\n\ndef retrieve_beam_centers(centers_file, voxel_size):\n \"\"\"\n Retrieve the position of the central tile for each tilt angle and convert from \n microns to pixels\n \n Parameters\n ----------\n centers_file : string \n filename of predicted beam centers, where x,y coordinates of each \n tile are listed in um on separate lines after the relevant tilt angle\n voxel_size : float \n voxel dimensions in A/pixel\n \n Returns\n -------\n origin_shifts : numpy.ndarray, shape (N, 2) \n global origin shifts applied to each tilt angle\n tilt_angles : numpy.ndarray, shape (N,) \n tilt angles ordered as images were collected\n \"\"\"\n \n origin_shifts, tilt_angles = list(), list()\n f = open(centers_file, 'r') \n content = f.readlines() \n \n # extract position of tile 0 for each tilt angle\n for line in content:\n as_array = np.array(line.strip().split()).astype(float)\n if (len(as_array) == 1):\n tilt_angles.append(as_array[0])\n counter = 0\n elif (len(as_array) >= 2) and (counter==0):\n origin_shifts.append(as_array * 1e4/voxel_size)\n counter += 1\n \n origin_shifts = np.array(origin_shifts)[:,:2] \n return origin_shifts, np.array(tilt_angles)\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n # retrieve origin shifts and tilt angles\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args['voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key,val) for key,val in zip(all_tilts,shifts))\n \n # retrieve all processed tilts and optionally reorder tilt series by angle\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n\n # translational offsets due to spiral\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n \n # generate empty tilt-series array\n tilt_series = np.zeros((len(retained_tilts),args['width']*2,args['width']*2))\n \n for xi,t in enumerate(retained_tilts):\n # load image and retrieve center coordinates\n image = mrcfile.open(args['stitched_prefix'] + f\"{int(t)}.mrc\").data\n xc, yc = (np.array(image.shape)/2).astype(int)\n\n # spiral translational offsets, accounting for change due to projection\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n\n tilt_series[xi] = image[xc-args['width']-x_offset: xc+args['width']-x_offset,\n yc-args['width']-y_offset: yc+args['width']-y_offset] \n\n return tilt_series, retained_tilts\n\n\ndef main():\n\n start_time = time.time()\n\n args = parse_commandline()\n args = modify_args(args)\n\n # generate tilt stack and save to .mrc format\n tseries, retained_tilts = stack_stitched(args)\n utils.save_mrc(tseries, args['output'], args['voxel_size'])\n\n # optionally save a corresponding .tlt file for IMOD\n if args['tilt_file'] is not None:\n np.savetxt(args['tilt_file'], retained_tilts, fmt=\"%i\", delimiter='\\n')\n\n print(f\"elapsed time is {((time.time()-start_time)/60.0):.2f} minutes\")\n\n\nif __name__ == '__main__':\n main()\n",
"from collections import OrderedDict\nfrom natsort import natsorted\nimport mrcfile, argparse, pickle\nimport glob, time, os, utils\nimport numpy as np\n<docstring token>\n\n\ndef parse_commandline():\n \"\"\"\n Parse commandline input.\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Generate a tilt-series from a set of sttiched tiles.')\n parser.add_argument('-i', '--stitched_prefix', help=\n 'Path to prefix file name of stitched images', required=True, type=str)\n parser.add_argument('-v', '--voxel_path', help=\n 'Path to MRC file with voxel size in header', required=False, type=str)\n parser.add_argument('-c', '--centers', help=\n 'Path to input beam centers file', required=True, type=str)\n parser.add_argument('-p', '--params', help=\n 'Path to circle-fitting parameters dictionary', required=True, type=str\n )\n parser.add_argument('-o', '--output', help='Output path for tilt stack',\n required=True, type=str)\n parser.add_argument('-w', '--width', help=\n 'Length in pixels of retained area of each stitched image',\n required=True, type=int)\n parser.add_argument('-r', '--rotation', help=\n 'Global rotation to apply to all beam centers', required=False,\n type=float, default=0)\n parser.add_argument('-re', '--reorder', help=\n 'Reorder tilts by increasing angle rather than by data collection',\n action='store_true')\n parser.add_argument('-t', '--tilt_file', help=\n 'Output path for tilt angles file, ordered as tilt series',\n required=False, type=str)\n parser.add_argument('-e', '--exclude_angles', help=\n 'List of tilt angles to exclude (space-separated)', required=False,\n nargs='+', type=int)\n return vars(parser.parse_args())\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\ndef retrieve_beam_centers(centers_file, voxel_size):\n \"\"\"\n Retrieve the position of the central tile for each tilt angle and convert from \n microns to pixels\n \n Parameters\n ----------\n centers_file : string \n filename of predicted beam centers, where x,y coordinates of each \n tile are listed in um on separate lines after the relevant tilt angle\n voxel_size : float \n voxel dimensions in A/pixel\n \n Returns\n -------\n origin_shifts : numpy.ndarray, shape (N, 2) \n global origin shifts applied to each tilt angle\n tilt_angles : numpy.ndarray, shape (N,) \n tilt angles ordered as images were collected\n \"\"\"\n origin_shifts, tilt_angles = list(), list()\n f = open(centers_file, 'r')\n content = f.readlines()\n for line in content:\n as_array = np.array(line.strip().split()).astype(float)\n if len(as_array) == 1:\n tilt_angles.append(as_array[0])\n counter = 0\n elif len(as_array) >= 2 and counter == 0:\n origin_shifts.append(as_array * 10000.0 / voxel_size)\n counter += 1\n origin_shifts = np.array(origin_shifts)[:, :2]\n return origin_shifts, np.array(tilt_angles)\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args[\n 'voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key, val) for key, val in zip(all_tilts, shifts))\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n tilt_series = np.zeros((len(retained_tilts), args['width'] * 2, args[\n 'width'] * 2))\n for xi, t in enumerate(retained_tilts):\n image = mrcfile.open(args['stitched_prefix'] + f'{int(t)}.mrc').data\n xc, yc = (np.array(image.shape) / 2).astype(int)\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n tilt_series[xi] = image[xc - args['width'] - x_offset:xc + args[\n 'width'] - x_offset, yc - args['width'] - y_offset:yc + args[\n 'width'] - y_offset]\n return tilt_series, retained_tilts\n\n\ndef main():\n start_time = time.time()\n args = parse_commandline()\n args = modify_args(args)\n tseries, retained_tilts = stack_stitched(args)\n utils.save_mrc(tseries, args['output'], args['voxel_size'])\n if args['tilt_file'] is not None:\n np.savetxt(args['tilt_file'], retained_tilts, fmt='%i', delimiter='\\n')\n print(f'elapsed time is {(time.time() - start_time) / 60.0:.2f} minutes')\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<docstring token>\n\n\ndef parse_commandline():\n \"\"\"\n Parse commandline input.\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Generate a tilt-series from a set of sttiched tiles.')\n parser.add_argument('-i', '--stitched_prefix', help=\n 'Path to prefix file name of stitched images', required=True, type=str)\n parser.add_argument('-v', '--voxel_path', help=\n 'Path to MRC file with voxel size in header', required=False, type=str)\n parser.add_argument('-c', '--centers', help=\n 'Path to input beam centers file', required=True, type=str)\n parser.add_argument('-p', '--params', help=\n 'Path to circle-fitting parameters dictionary', required=True, type=str\n )\n parser.add_argument('-o', '--output', help='Output path for tilt stack',\n required=True, type=str)\n parser.add_argument('-w', '--width', help=\n 'Length in pixels of retained area of each stitched image',\n required=True, type=int)\n parser.add_argument('-r', '--rotation', help=\n 'Global rotation to apply to all beam centers', required=False,\n type=float, default=0)\n parser.add_argument('-re', '--reorder', help=\n 'Reorder tilts by increasing angle rather than by data collection',\n action='store_true')\n parser.add_argument('-t', '--tilt_file', help=\n 'Output path for tilt angles file, ordered as tilt series',\n required=False, type=str)\n parser.add_argument('-e', '--exclude_angles', help=\n 'List of tilt angles to exclude (space-separated)', required=False,\n nargs='+', type=int)\n return vars(parser.parse_args())\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\ndef retrieve_beam_centers(centers_file, voxel_size):\n \"\"\"\n Retrieve the position of the central tile for each tilt angle and convert from \n microns to pixels\n \n Parameters\n ----------\n centers_file : string \n filename of predicted beam centers, where x,y coordinates of each \n tile are listed in um on separate lines after the relevant tilt angle\n voxel_size : float \n voxel dimensions in A/pixel\n \n Returns\n -------\n origin_shifts : numpy.ndarray, shape (N, 2) \n global origin shifts applied to each tilt angle\n tilt_angles : numpy.ndarray, shape (N,) \n tilt angles ordered as images were collected\n \"\"\"\n origin_shifts, tilt_angles = list(), list()\n f = open(centers_file, 'r')\n content = f.readlines()\n for line in content:\n as_array = np.array(line.strip().split()).astype(float)\n if len(as_array) == 1:\n tilt_angles.append(as_array[0])\n counter = 0\n elif len(as_array) >= 2 and counter == 0:\n origin_shifts.append(as_array * 10000.0 / voxel_size)\n counter += 1\n origin_shifts = np.array(origin_shifts)[:, :2]\n return origin_shifts, np.array(tilt_angles)\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args[\n 'voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key, val) for key, val in zip(all_tilts, shifts))\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n tilt_series = np.zeros((len(retained_tilts), args['width'] * 2, args[\n 'width'] * 2))\n for xi, t in enumerate(retained_tilts):\n image = mrcfile.open(args['stitched_prefix'] + f'{int(t)}.mrc').data\n xc, yc = (np.array(image.shape) / 2).astype(int)\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n tilt_series[xi] = image[xc - args['width'] - x_offset:xc + args[\n 'width'] - x_offset, yc - args['width'] - y_offset:yc + args[\n 'width'] - y_offset]\n return tilt_series, retained_tilts\n\n\ndef main():\n start_time = time.time()\n args = parse_commandline()\n args = modify_args(args)\n tseries, retained_tilts = stack_stitched(args)\n utils.save_mrc(tseries, args['output'], args['voxel_size'])\n if args['tilt_file'] is not None:\n np.savetxt(args['tilt_file'], retained_tilts, fmt='%i', delimiter='\\n')\n print(f'elapsed time is {(time.time() - start_time) / 60.0:.2f} minutes')\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<docstring token>\n\n\ndef parse_commandline():\n \"\"\"\n Parse commandline input.\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Generate a tilt-series from a set of sttiched tiles.')\n parser.add_argument('-i', '--stitched_prefix', help=\n 'Path to prefix file name of stitched images', required=True, type=str)\n parser.add_argument('-v', '--voxel_path', help=\n 'Path to MRC file with voxel size in header', required=False, type=str)\n parser.add_argument('-c', '--centers', help=\n 'Path to input beam centers file', required=True, type=str)\n parser.add_argument('-p', '--params', help=\n 'Path to circle-fitting parameters dictionary', required=True, type=str\n )\n parser.add_argument('-o', '--output', help='Output path for tilt stack',\n required=True, type=str)\n parser.add_argument('-w', '--width', help=\n 'Length in pixels of retained area of each stitched image',\n required=True, type=int)\n parser.add_argument('-r', '--rotation', help=\n 'Global rotation to apply to all beam centers', required=False,\n type=float, default=0)\n parser.add_argument('-re', '--reorder', help=\n 'Reorder tilts by increasing angle rather than by data collection',\n action='store_true')\n parser.add_argument('-t', '--tilt_file', help=\n 'Output path for tilt angles file, ordered as tilt series',\n required=False, type=str)\n parser.add_argument('-e', '--exclude_angles', help=\n 'List of tilt angles to exclude (space-separated)', required=False,\n nargs='+', type=int)\n return vars(parser.parse_args())\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\ndef retrieve_beam_centers(centers_file, voxel_size):\n \"\"\"\n Retrieve the position of the central tile for each tilt angle and convert from \n microns to pixels\n \n Parameters\n ----------\n centers_file : string \n filename of predicted beam centers, where x,y coordinates of each \n tile are listed in um on separate lines after the relevant tilt angle\n voxel_size : float \n voxel dimensions in A/pixel\n \n Returns\n -------\n origin_shifts : numpy.ndarray, shape (N, 2) \n global origin shifts applied to each tilt angle\n tilt_angles : numpy.ndarray, shape (N,) \n tilt angles ordered as images were collected\n \"\"\"\n origin_shifts, tilt_angles = list(), list()\n f = open(centers_file, 'r')\n content = f.readlines()\n for line in content:\n as_array = np.array(line.strip().split()).astype(float)\n if len(as_array) == 1:\n tilt_angles.append(as_array[0])\n counter = 0\n elif len(as_array) >= 2 and counter == 0:\n origin_shifts.append(as_array * 10000.0 / voxel_size)\n counter += 1\n origin_shifts = np.array(origin_shifts)[:, :2]\n return origin_shifts, np.array(tilt_angles)\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args[\n 'voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key, val) for key, val in zip(all_tilts, shifts))\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n tilt_series = np.zeros((len(retained_tilts), args['width'] * 2, args[\n 'width'] * 2))\n for xi, t in enumerate(retained_tilts):\n image = mrcfile.open(args['stitched_prefix'] + f'{int(t)}.mrc').data\n xc, yc = (np.array(image.shape) / 2).astype(int)\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n tilt_series[xi] = image[xc - args['width'] - x_offset:xc + args[\n 'width'] - x_offset, yc - args['width'] - y_offset:yc + args[\n 'width'] - y_offset]\n return tilt_series, retained_tilts\n\n\ndef main():\n start_time = time.time()\n args = parse_commandline()\n args = modify_args(args)\n tseries, retained_tilts = stack_stitched(args)\n utils.save_mrc(tseries, args['output'], args['voxel_size'])\n if args['tilt_file'] is not None:\n np.savetxt(args['tilt_file'], retained_tilts, fmt='%i', delimiter='\\n')\n print(f'elapsed time is {(time.time() - start_time) / 60.0:.2f} minutes')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\ndef retrieve_beam_centers(centers_file, voxel_size):\n \"\"\"\n Retrieve the position of the central tile for each tilt angle and convert from \n microns to pixels\n \n Parameters\n ----------\n centers_file : string \n filename of predicted beam centers, where x,y coordinates of each \n tile are listed in um on separate lines after the relevant tilt angle\n voxel_size : float \n voxel dimensions in A/pixel\n \n Returns\n -------\n origin_shifts : numpy.ndarray, shape (N, 2) \n global origin shifts applied to each tilt angle\n tilt_angles : numpy.ndarray, shape (N,) \n tilt angles ordered as images were collected\n \"\"\"\n origin_shifts, tilt_angles = list(), list()\n f = open(centers_file, 'r')\n content = f.readlines()\n for line in content:\n as_array = np.array(line.strip().split()).astype(float)\n if len(as_array) == 1:\n tilt_angles.append(as_array[0])\n counter = 0\n elif len(as_array) >= 2 and counter == 0:\n origin_shifts.append(as_array * 10000.0 / voxel_size)\n counter += 1\n origin_shifts = np.array(origin_shifts)[:, :2]\n return origin_shifts, np.array(tilt_angles)\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args[\n 'voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key, val) for key, val in zip(all_tilts, shifts))\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n tilt_series = np.zeros((len(retained_tilts), args['width'] * 2, args[\n 'width'] * 2))\n for xi, t in enumerate(retained_tilts):\n image = mrcfile.open(args['stitched_prefix'] + f'{int(t)}.mrc').data\n xc, yc = (np.array(image.shape) / 2).astype(int)\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n tilt_series[xi] = image[xc - args['width'] - x_offset:xc + args[\n 'width'] - x_offset, yc - args['width'] - y_offset:yc + args[\n 'width'] - y_offset]\n return tilt_series, retained_tilts\n\n\ndef main():\n start_time = time.time()\n args = parse_commandline()\n args = modify_args(args)\n tseries, retained_tilts = stack_stitched(args)\n utils.save_mrc(tseries, args['output'], args['voxel_size'])\n if args['tilt_file'] is not None:\n np.savetxt(args['tilt_file'], retained_tilts, fmt='%i', delimiter='\\n')\n print(f'elapsed time is {(time.time() - start_time) / 60.0:.2f} minutes')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\n<function token>\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args[\n 'voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key, val) for key, val in zip(all_tilts, shifts))\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n tilt_series = np.zeros((len(retained_tilts), args['width'] * 2, args[\n 'width'] * 2))\n for xi, t in enumerate(retained_tilts):\n image = mrcfile.open(args['stitched_prefix'] + f'{int(t)}.mrc').data\n xc, yc = (np.array(image.shape) / 2).astype(int)\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n tilt_series[xi] = image[xc - args['width'] - x_offset:xc + args[\n 'width'] - x_offset, yc - args['width'] - y_offset:yc + args[\n 'width'] - y_offset]\n return tilt_series, retained_tilts\n\n\ndef main():\n start_time = time.time()\n args = parse_commandline()\n args = modify_args(args)\n tseries, retained_tilts = stack_stitched(args)\n utils.save_mrc(tseries, args['output'], args['voxel_size'])\n if args['tilt_file'] is not None:\n np.savetxt(args['tilt_file'], retained_tilts, fmt='%i', delimiter='\\n')\n print(f'elapsed time is {(time.time() - start_time) / 60.0:.2f} minutes')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\n<function token>\n\n\ndef stack_stitched(args):\n \"\"\"\n Crop and then stack stitched tilt images into a tilt-series, accounting\n for the global offset in the tile positions between tilt angles.\n\n Parameters\n ----------\n args: dictionary \n command line arguments\n\n Returns\n -------\n tilt_series : numpy.ndarray, shape (n_tilts, N, N) \n coarsely-aligned tilt-stack\n tilts : numpy.ndarray, shape (N,) \n tilt angles ordered as images in tilt_series\n \"\"\"\n shifts, all_tilts = retrieve_beam_centers(args['centers'], args[\n 'voxel_size'])\n shifts = utils.apply_rotation(shifts, args['rotation'])\n shifts = np.fliplr(shifts)\n t_shifts = OrderedDict((key, val) for key, val in zip(all_tilts, shifts))\n tilts = np.array(list(args['params'].keys()))\n if args['reorder'] is True:\n tilts = np.array(sorted(tilts))\n retained_tilts = np.setdiff1d(tilts, args['exclude_angles'])\n offsets = np.array([t_shifts[xi] for xi in retained_tilts])\n tilt_series = np.zeros((len(retained_tilts), args['width'] * 2, args[\n 'width'] * 2))\n for xi, t in enumerate(retained_tilts):\n image = mrcfile.open(args['stitched_prefix'] + f'{int(t)}.mrc').data\n xc, yc = (np.array(image.shape) / 2).astype(int)\n x_offset, y_offset = np.around(offsets[xi]).astype(int)\n x_offset *= np.cos(np.deg2rad(t))\n x_offset = np.around(x_offset).astype(int)\n tilt_series[xi] = image[xc - args['width'] - x_offset:xc + args[\n 'width'] - x_offset, yc - args['width'] - y_offset:yc + args[\n 'width'] - y_offset]\n return tilt_series, retained_tilts\n\n\n<function token>\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\ndef modify_args(args):\n \"\"\"\n Modify command line arguments and add additional information to keys.\n \"\"\"\n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + '0.mrc'\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x)\n mrc.close()\n args['params'] = pickle.load(open(args['params'], 'rb'))\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n return args\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,431 |
54e61d1ac9132d0ef4a0ffcdc24ca0503b1b1faa
|
from sqlalchemy import Column, ForeignKey, Integer, String, Float, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from flask.ext.login import UserMixin
from config import SQLALCHEMY_DATABASE_URI
Base = declarative_base()
class User(UserMixin, Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
social_id = Column(String(64), nullable=False, unique=True)
nickname = Column(String(64), nullable=False)
email = Column(String(64), nullable=True)
class ProdCat(Base):
__tablename__ = 'prod_category'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
desc = Column(String(250))
owner_id = Column(Integer, ForeignKey('users.id'))
users = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'desc': self.desc,
}
class ProdItem(Base):
__tablename__ = 'prod_item'
prdname = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
prd_desc = Column(String(250))
price = Column(Float)
num_in_stock = Column(Integer)
featured = Column(Boolean, default=False)
prdcat_id = Column(Integer, ForeignKey('prod_category.id'))
prod_category = relationship(ProdCat)
def __repr__(self):
r = '<Product {:d} {} {}>'
return r.format(self.id, self.prd_desc, self.price)
@property
def serialize(self):
return {
'name': self.prdname,
'description': self.prd_desc,
'id': self.id,
'price': self.price,
'Instock': self.num_in_stock,
'Featured': self.featured,
}
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Base.metadata.create_all(engine)
|
[
"from sqlalchemy import Column, ForeignKey, Integer, String, Float, Boolean\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import relationship\r\nfrom sqlalchemy import create_engine\r\nfrom flask.ext.login import UserMixin\r\nfrom config import SQLALCHEMY_DATABASE_URI\r\n\r\nBase = declarative_base()\r\n\r\n\r\nclass User(UserMixin, Base):\r\n __tablename__ = 'users'\r\n id = Column(Integer, primary_key=True)\r\n social_id = Column(String(64), nullable=False, unique=True)\r\n nickname = Column(String(64), nullable=False)\r\n email = Column(String(64), nullable=True)\r\n\r\n\r\nclass ProdCat(Base):\r\n __tablename__ = 'prod_category'\r\n\r\n id = Column(Integer, primary_key=True)\r\n name = Column(String(250), nullable=False)\r\n desc = Column(String(250))\r\n owner_id = Column(Integer, ForeignKey('users.id'))\r\n users = relationship(User)\r\n\r\n @property\r\n def serialize(self):\r\n\r\n return {\r\n 'id': self.id,\r\n 'name': self.name,\r\n 'desc': self.desc,\r\n }\r\n\r\n\r\nclass ProdItem(Base):\r\n __tablename__ = 'prod_item'\r\n\r\n prdname = Column(String(80), nullable=False)\r\n id = Column(Integer, primary_key=True)\r\n prd_desc = Column(String(250))\r\n price = Column(Float)\r\n num_in_stock = Column(Integer)\r\n featured = Column(Boolean, default=False)\r\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\r\n prod_category = relationship(ProdCat)\r\n\r\n def __repr__(self):\r\n r = '<Product {:d} {} {}>'\r\n return r.format(self.id, self.prd_desc, self.price)\r\n\r\n @property\r\n def serialize(self):\r\n\r\n return {\r\n 'name': self.prdname,\r\n 'description': self.prd_desc,\r\n 'id': self.id,\r\n 'price': self.price,\r\n 'Instock': self.num_in_stock,\r\n 'Featured': self.featured,\r\n }\r\n\r\n\r\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\r\nBase.metadata.create_all(engine)\r\n",
"from sqlalchemy import Column, ForeignKey, Integer, String, Float, Boolean\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom flask.ext.login import UserMixin\nfrom config import SQLALCHEMY_DATABASE_URI\nBase = declarative_base()\n\n\nclass User(UserMixin, Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n social_id = Column(String(64), nullable=False, unique=True)\n nickname = Column(String(64), nullable=False)\n email = Column(String(64), nullable=True)\n\n\nclass ProdCat(Base):\n __tablename__ = 'prod_category'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n desc = Column(String(250))\n owner_id = Column(Integer, ForeignKey('users.id'))\n users = relationship(User)\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nBase.metadata.create_all(engine)\n",
"<import token>\nBase = declarative_base()\n\n\nclass User(UserMixin, Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n social_id = Column(String(64), nullable=False, unique=True)\n nickname = Column(String(64), nullable=False)\n email = Column(String(64), nullable=True)\n\n\nclass ProdCat(Base):\n __tablename__ = 'prod_category'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n desc = Column(String(250))\n owner_id = Column(Integer, ForeignKey('users.id'))\n users = relationship(User)\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nBase.metadata.create_all(engine)\n",
"<import token>\n<assignment token>\n\n\nclass User(UserMixin, Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n social_id = Column(String(64), nullable=False, unique=True)\n nickname = Column(String(64), nullable=False)\n email = Column(String(64), nullable=True)\n\n\nclass ProdCat(Base):\n __tablename__ = 'prod_category'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n desc = Column(String(250))\n owner_id = Column(Integer, ForeignKey('users.id'))\n users = relationship(User)\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\nBase.metadata.create_all(engine)\n",
"<import token>\n<assignment token>\n\n\nclass User(UserMixin, Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n social_id = Column(String(64), nullable=False, unique=True)\n nickname = Column(String(64), nullable=False)\n email = Column(String(64), nullable=True)\n\n\nclass ProdCat(Base):\n __tablename__ = 'prod_category'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n desc = Column(String(250))\n owner_id = Column(Integer, ForeignKey('users.id'))\n users = relationship(User)\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass User(UserMixin, Base):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass ProdCat(Base):\n __tablename__ = 'prod_category'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n desc = Column(String(250))\n owner_id = Column(Integer, ForeignKey('users.id'))\n users = relationship(User)\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass ProdCat(Base):\n __tablename__ = 'prod_category'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n desc = Column(String(250))\n owner_id = Column(Integer, ForeignKey('users.id'))\n users = relationship(User)\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass ProdCat(Base):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n @property\n def serialize(self):\n return {'id': self.id, 'name': self.name, 'desc': self.desc}\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass ProdCat(Base):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ProdItem(Base):\n __tablename__ = 'prod_item'\n prdname = Column(String(80), nullable=False)\n id = Column(Integer, primary_key=True)\n prd_desc = Column(String(250))\n price = Column(Float)\n num_in_stock = Column(Integer)\n featured = Column(Boolean, default=False)\n prdcat_id = Column(Integer, ForeignKey('prod_category.id'))\n prod_category = relationship(ProdCat)\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ProdItem(Base):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __repr__(self):\n r = '<Product {:d} {} {}>'\n return r.format(self.id, self.prd_desc, self.price)\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ProdItem(Base):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def serialize(self):\n return {'name': self.prdname, 'description': self.prd_desc, 'id':\n self.id, 'price': self.price, 'Instock': self.num_in_stock,\n 'Featured': self.featured}\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ProdItem(Base):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
98,432 |
9ce9867542834c3f3186147a2439f0fc4eb05e04
|
import turtle
counter=0
distance = 10
angle = 10
while counter<1:
distance=int(input("Please enter a distance: "))
angle=int(input("Please enter an angle: "))
turtle.forward(distance)
turtle.setheading(angle)
if distance==0:
counter=counter+1
print('What an amazing drawing!!!!')
|
[
"import turtle\r\ncounter=0\r\ndistance = 10\r\nangle = 10\r\nwhile counter<1:\r\n distance=int(input(\"Please enter a distance: \"))\r\n angle=int(input(\"Please enter an angle: \"))\r\n turtle.forward(distance)\r\n turtle.setheading(angle)\r\n if distance==0:\r\n counter=counter+1\r\n\r\nprint('What an amazing drawing!!!!')\r\n",
"import turtle\ncounter = 0\ndistance = 10\nangle = 10\nwhile counter < 1:\n distance = int(input('Please enter a distance: '))\n angle = int(input('Please enter an angle: '))\n turtle.forward(distance)\n turtle.setheading(angle)\n if distance == 0:\n counter = counter + 1\nprint('What an amazing drawing!!!!')\n",
"<import token>\ncounter = 0\ndistance = 10\nangle = 10\nwhile counter < 1:\n distance = int(input('Please enter a distance: '))\n angle = int(input('Please enter an angle: '))\n turtle.forward(distance)\n turtle.setheading(angle)\n if distance == 0:\n counter = counter + 1\nprint('What an amazing drawing!!!!')\n",
"<import token>\n<assignment token>\nwhile counter < 1:\n distance = int(input('Please enter a distance: '))\n angle = int(input('Please enter an angle: '))\n turtle.forward(distance)\n turtle.setheading(angle)\n if distance == 0:\n counter = counter + 1\nprint('What an amazing drawing!!!!')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,433 |
6c4bf7a8adccf8d49c0da43c808f83ece4d7e487
|
import boto3
access_key = "TODO : Add your access key"
access_secret = "TODO : Add your secret"
region = "us-west-2"
client = boto3.client('rekognition', aws_access_key_id=access_key, aws_secret_access_key=access_secret, region_name=region)
client.stop_project_version(ProjectVersionArn='TODO : Model Arn')
|
[
"import boto3\n\naccess_key = \"TODO : Add your access key\"\naccess_secret = \"TODO : Add your secret\"\nregion = \"us-west-2\"\n\nclient = boto3.client('rekognition', aws_access_key_id=access_key, aws_secret_access_key=access_secret, region_name=region)\nclient.stop_project_version(ProjectVersionArn='TODO : Model Arn')\n\n\n",
"import boto3\naccess_key = 'TODO : Add your access key'\naccess_secret = 'TODO : Add your secret'\nregion = 'us-west-2'\nclient = boto3.client('rekognition', aws_access_key_id=access_key,\n aws_secret_access_key=access_secret, region_name=region)\nclient.stop_project_version(ProjectVersionArn='TODO : Model Arn')\n",
"<import token>\naccess_key = 'TODO : Add your access key'\naccess_secret = 'TODO : Add your secret'\nregion = 'us-west-2'\nclient = boto3.client('rekognition', aws_access_key_id=access_key,\n aws_secret_access_key=access_secret, region_name=region)\nclient.stop_project_version(ProjectVersionArn='TODO : Model Arn')\n",
"<import token>\n<assignment token>\nclient.stop_project_version(ProjectVersionArn='TODO : Model Arn')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,434 |
c3a02b70f47dda96229151b6196d6d3f3ed7e431
|
import pytest
from bestquiz import bestQuiz
import os
import sys
sys.path.append(os.getcwd())
@pytest.mark.parametrize('a, check', [
([ [ 88, 80, 91 ],
[ 68, 100, -1 ]],2),
([ [ 80, 91, 82 ],
[ -1, -1, 100 ]],1),
([ [ 88, 89, 90 ],
[ -1, -1, -1 ]],2),
([ [ 80, 88, 80],
[ 100, 68, 100]],0),
([ [-1, -1, -1 ],
[-1, -1, -1 ]],None),
])
def test_bestquiz(a, check):
assert bestQuiz(a) == check
|
[
"import pytest\nfrom bestquiz import bestQuiz\nimport os\nimport sys\nsys.path.append(os.getcwd())\n\n\[email protected]('a, check', [\n\n\t([ [ 88, 80, 91 ],\n\t\t[ 68, 100, -1 ]],2),\n\t([ [ 80, 91, 82 ],\n\t\t[ -1, -1, 100 ]],1),\n\t([ [ 88, 89, 90 ],\n\t\t[ -1, -1, -1 ]],2),\n\t([ [ 80, 88, 80],\n\t\t[ 100, 68, 100]],0),\n\t([ [-1, -1, -1 ],\n\t\t[-1, -1, -1 ]],None),\n\n\t])\ndef test_bestquiz(a, check):\n\tassert bestQuiz(a) == check\n\n",
"import pytest\nfrom bestquiz import bestQuiz\nimport os\nimport sys\nsys.path.append(os.getcwd())\n\n\[email protected]('a, check', [([[88, 80, 91], [68, 100, -1]], 2), (\n [[80, 91, 82], [-1, -1, 100]], 1), ([[88, 89, 90], [-1, -1, -1]], 2), (\n [[80, 88, 80], [100, 68, 100]], 0), ([[-1, -1, -1], [-1, -1, -1]], None)])\ndef test_bestquiz(a, check):\n assert bestQuiz(a) == check\n",
"<import token>\nsys.path.append(os.getcwd())\n\n\[email protected]('a, check', [([[88, 80, 91], [68, 100, -1]], 2), (\n [[80, 91, 82], [-1, -1, 100]], 1), ([[88, 89, 90], [-1, -1, -1]], 2), (\n [[80, 88, 80], [100, 68, 100]], 0), ([[-1, -1, -1], [-1, -1, -1]], None)])\ndef test_bestquiz(a, check):\n assert bestQuiz(a) == check\n",
"<import token>\n<code token>\n\n\[email protected]('a, check', [([[88, 80, 91], [68, 100, -1]], 2), (\n [[80, 91, 82], [-1, -1, 100]], 1), ([[88, 89, 90], [-1, -1, -1]], 2), (\n [[80, 88, 80], [100, 68, 100]], 0), ([[-1, -1, -1], [-1, -1, -1]], None)])\ndef test_bestquiz(a, check):\n assert bestQuiz(a) == check\n",
"<import token>\n<code token>\n<function token>\n"
] | false |
98,435 |
72fc4d88dffc2c354c4632ee15bf4fa494f464ab
|
def sortedWithCmp(values,cmpFunc):
# cmpFunc(a,b): take two arguments. Returns True is a > b else False
# Modify sortedWithCmp so that it uses cmpFunc to compare the elements.
# You will need to use cmpFunc in merge.
if len(values) < 2:
return values
else:
return merge(sortedWithCmp(values[0:len(values)//2],cmpFunc), sortedWithCmp(values[len(values)//2:],cmpFunc))
def merge(left,right):
leftIndex,rightIndex = 0,0
merged = []
while leftIndex < len(left) and rightIndex < len(right):
if left[leftIndex] < right[rightIndex]:
merged.append(left[leftIndex])
leftIndex += 1
else:
merged.append(right[rightIndex])
rightIndex += 1
merged += left[leftIndex:]+right[rightIndex:]
return merged
|
[
"def sortedWithCmp(values,cmpFunc):\n # cmpFunc(a,b): take two arguments. Returns True is a > b else False\n # Modify sortedWithCmp so that it uses cmpFunc to compare the elements.\n # You will need to use cmpFunc in merge. \n if len(values) < 2: \n return values\n else:\n return merge(sortedWithCmp(values[0:len(values)//2],cmpFunc), sortedWithCmp(values[len(values)//2:],cmpFunc))\n \ndef merge(left,right):\n leftIndex,rightIndex = 0,0\n merged = []\n while leftIndex < len(left) and rightIndex < len(right):\n if left[leftIndex] < right[rightIndex]:\n merged.append(left[leftIndex])\n leftIndex += 1\n else:\n merged.append(right[rightIndex])\n rightIndex += 1\n merged += left[leftIndex:]+right[rightIndex:]\n return merged",
"def sortedWithCmp(values, cmpFunc):\n if len(values) < 2:\n return values\n else:\n return merge(sortedWithCmp(values[0:len(values) // 2], cmpFunc),\n sortedWithCmp(values[len(values) // 2:], cmpFunc))\n\n\ndef merge(left, right):\n leftIndex, rightIndex = 0, 0\n merged = []\n while leftIndex < len(left) and rightIndex < len(right):\n if left[leftIndex] < right[rightIndex]:\n merged.append(left[leftIndex])\n leftIndex += 1\n else:\n merged.append(right[rightIndex])\n rightIndex += 1\n merged += left[leftIndex:] + right[rightIndex:]\n return merged\n",
"def sortedWithCmp(values, cmpFunc):\n if len(values) < 2:\n return values\n else:\n return merge(sortedWithCmp(values[0:len(values) // 2], cmpFunc),\n sortedWithCmp(values[len(values) // 2:], cmpFunc))\n\n\n<function token>\n",
"<function token>\n<function token>\n"
] | false |
98,436 |
9526c585a64a42788788e0f2c7de00418642cbca
|
from django.shortcuts import render, redirect
from core.forms import FormAluno, FormTurma, FormMateria, FormResultado
from core.models import Aluno, Turma, Materia, Resultado
from django.urls import reverse_lazy
from django.views import generic
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
# Create your views here.
def home(request):
return render(request, 'core/index.html')
class Registrar(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/register.html'
@login_required
def cadastro_aluno(request):
if request.user.is_staff:
form = FormAluno(None or request.POST, request.FILES or None)
if form.is_valid():
form.save()
messages.success(request, "Aluno cadastrado com sucesso!")
return redirect('url_listagem_alunos')
contexto = {'form' : form,
'texto_title': 'CadAlu',
'texto_titulo':'Cadastro Aluno',
'texto_botao':'Cadastrar',
'url_voltar':'url_principal'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
@login_required
def listagem_alunos(request):
try:
if request.user.is_staff:
if request.POST and request.POST['aluno_input']:
dados = Aluno.objects.filter(nome=request.POST['aluno_input'])
else:
dados = Aluno.objects.all()
contexto = {'dados': dados}
return render(request, 'core/listagem_alunos.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def exclui_aluno(request, id):
try:
if request.user.is_staff:
obj = Aluno.objects.get(id=id)
if request.POST:
obj.delete()
messages.success(request, "Aluno excluido com sucesso!")
return redirect('url_listagem_alunos')
else:
contexto = {'dados': obj.nome, 'id': obj.id, 'url': 'url_listagem_alunos'}
return render(request, 'core/confirma_exclusao.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def atualiza_aluno(request, id):
try:
if request.user.is_staff:
obj = Aluno.objects.get(id=id)
form = FormAluno(request.POST or None, request.FILES or None, instance=obj)
if form.is_valid():
form.save()
messages.success(request, "Aluno atualizado com sucesso!")
return redirect('url_listagem_alunos')
else:
contexto = {
'form': form,
'texto_title': 'AtuAlu',
'texto_titulo':'Atualização Aluno',
'texto_botao':'Atualizar',
'url_voltar':'url_listagem_alunos'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def cadastro_turma(request):
if request.user.is_staff:
form = FormTurma(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, "Turma cadastrada com sucesso!")
return redirect('url_listagem_turmas')
contexto = {'form' : form,
'texto_title': 'CadTurm',
'texto_titulo':'Cadastro Turmas',
'texto_botao':'Cadastrar',
'url_voltar':'url_principal'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
@login_required
def listagem_turmas(request):
try:
if request.user.is_staff:
if request.POST and request.POST['turma_input']:
dados = Turma.objects.filter(serie=request.POST['turma_input'])
else:
dados = Turma.objects.all()
contexto = {'dados': dados}
return render(request, 'core/listagem_turmas.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def exclui_turma(request, id):
try:
if request.user.is_staff:
obj = Turma.objects.get(id=id)
if request.POST:
obj.delete()
messages.success(request, "Turma excluida com sucesso!")
return redirect('url_listagem_turmas')
else:
contexto = {'dados': obj.serie, 'id': obj.id, 'url': 'url_listagem_turmas'}
return render(request, 'core/confirma_exclusao.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def atualiza_turma(request, id):
try:
if request.user.is_staff:
obj = Turma.objects.get(id=id)
form = FormTurma(request.POST or None, request.FILES or None, instance=obj)
if form.is_valid():
form.save()
messages.success(request, "Turma atualizada com sucesso!")
return redirect('url_listagem_turmas')
else:
contexto = {
'form': form,
'texto_title': 'AtuTurm',
'texto_titulo':'Atualização Turma',
'texto_botao':'Atualizar',
'url_voltar':'url_listagem_turmas'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def cadastro_materia(request):
if request.user.is_staff:
form = FormMateria(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, "Matéria cadastrada com sucesso!")
return redirect('url_listagem_materias')
contexto = {'form' : form,
'texto_title': 'CadMateria',
'texto_titulo':'Cadastro Materias',
'texto_botao':'Cadastrar',
'url_voltar':'url_principal'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
@login_required
def listagem_materias(request):
try:
if request.user.is_staff:
if request.POST and request.POST['materia_input']:
dados = Materia.objects.filter(nome=request.POST['materia_input'])
else:
dados = Materia.objects.all()
contexto = {'dados': dados}
return render(request, 'core/listagem_materias.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def exclui_materia(request, id):
try:
if request.user.is_staff:
obj = Materia.objects.get(id=id)
if request.POST:
obj.delete()
messages.success(request, "Matéria excluida com sucesso!")
return redirect('url_listagem_materias')
else:
contexto = {'dados': obj.nome, 'id': obj.id, 'url': 'url_listagem_materias'}
return render(request, 'core/confirma_exclusao.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def atualiza_materia(request, id):
try:
if request.user.is_staff:
obj = Materia.objects.get(id=id)
form = FormMateria(request.POST or None, request.FILES or None, instance=obj)
if form.is_valid():
form.save()
messages.success(request, "Matéria atualizada com sucesso!")
return redirect('url_listagem_materias')
else:
contexto = {
'form': form,
'texto_title': 'AtuMateria',
'texto_titulo':'Atualização Matéria',
'texto_botao':'Atualizar',
'url_voltar':'url_listagem_materias'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def cadastro_resultado(request):
if request.user.is_staff:
form = FormResultado(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, "Resultado cadastrado com sucesso!")
return redirect('url_listagem_resultados')
contexto = {'form' : form,
'texto_title': 'CadRes',
'texto_titulo':'Cadastro Resultados',
'texto_botao':'Cadastrar',
'url_voltar':'url_principal'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
@login_required
def listagem_resultados(request):
try:
if request.user.is_staff:
if request.POST and request.POST['resultado_input']:
dados = Resultado.objects.filter(idAluno__nome=request.POST['resultado_input'])
else:
dados = Resultado.objects.all()
contexto = {'dados': dados}
return render(request, 'core/listagem_resultados.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def exclui_resultado(request, id):
try:
if request.user.is_staff:
obj = Resultado.objects.get(id=id)
if request.POST:
obj.delete()
messages.success(request, "Resultado excluido com sucesso!")
return redirect('url_listagem_resultados')
else:
contexto = {'dados': obj.idAluno, 'id': obj.id, 'url': 'url_listagem_materias'}
return render(request, 'core/confirma_exclusao.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
@login_required
def atualiza_resultado(request, id):
try:
if request.user.is_staff:
obj = Resultado.objects.get(id=id)
form = FormResultado(request.POST or None, request.FILES or None, instance=obj)
if form.is_valid():
form.save()
messages.success(request, "Resultado atualizado com sucesso!")
return redirect('url_listagem_resultados')
else:
contexto = {
'form': form,
'texto_title': 'AtuRes',
'texto_titulo':'Atualização Resultados',
'texto_botao':'Atualizar',
'url_voltar':'url_listagem_resultados'
}
return render(request, 'core/cadastro.html', contexto)
else:
return render(request, 'core/NaoAutorizado.html')
except Exception as erro:
return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})
|
[
"from django.shortcuts import render, redirect\nfrom core.forms import FormAluno, FormTurma, FormMateria, FormResultado\nfrom core.models import Aluno, Turma, Materia, Resultado\nfrom django.urls import reverse_lazy\nfrom django.views import generic\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n# Create your views here.\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, \"Aluno cadastrado com sucesso!\")\n return redirect('url_listagem_alunos')\n contexto = {'form' : form,\n 'texto_title': 'CadAlu',\n 'texto_titulo':'Cadastro Aluno',\n 'texto_botao':'Cadastrar',\n 'url_voltar':'url_principal'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_alunos(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['aluno_input']:\n dados = Aluno.objects.filter(nome=request.POST['aluno_input'])\n else:\n dados = Aluno.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_alunos.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, \"Aluno excluido com sucesso!\")\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url': 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, \"Aluno atualizado com sucesso!\")\n return redirect('url_listagem_alunos')\n else:\n contexto = {\n 'form': form,\n 'texto_title': 'AtuAlu',\n 'texto_titulo':'Atualização Aluno',\n 'texto_botao':'Atualizar',\n 'url_voltar':'url_listagem_alunos'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, \"Turma cadastrada com sucesso!\")\n return redirect('url_listagem_turmas')\n contexto = {'form' : form,\n 'texto_title': 'CadTurm',\n 'texto_titulo':'Cadastro Turmas',\n 'texto_botao':'Cadastrar',\n 'url_voltar':'url_principal'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, \"Turma excluida com sucesso!\")\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url': 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n form = FormTurma(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, \"Turma atualizada com sucesso!\")\n return redirect('url_listagem_turmas')\n else:\n contexto = {\n 'form': form,\n 'texto_title': 'AtuTurm',\n 'texto_titulo':'Atualização Turma',\n 'texto_botao':'Atualizar',\n 'url_voltar':'url_listagem_turmas'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, \"Matéria cadastrada com sucesso!\")\n return redirect('url_listagem_materias')\n contexto = {'form' : form,\n 'texto_title': 'CadMateria',\n 'texto_titulo':'Cadastro Materias',\n 'texto_botao':'Cadastrar',\n 'url_voltar':'url_principal'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST['materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, \"Matéria excluida com sucesso!\")\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url': 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, \"Matéria atualizada com sucesso!\")\n return redirect('url_listagem_materias')\n else:\n contexto = {\n 'form': form,\n 'texto_title': 'AtuMateria',\n 'texto_titulo':'Atualização Matéria',\n 'texto_botao':'Atualizar',\n 'url_voltar':'url_listagem_materias'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_resultado(request):\n if request.user.is_staff:\n form = FormResultado(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, \"Resultado cadastrado com sucesso!\")\n return redirect('url_listagem_resultados')\n contexto = {'form' : form,\n 'texto_title': 'CadRes',\n 'texto_titulo':'Cadastro Resultados',\n 'texto_botao':'Cadastrar',\n 'url_voltar':'url_principal'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, \"Resultado excluido com sucesso!\")\n return redirect('url_listagem_resultados')\n else:\n contexto = {'dados': obj.idAluno, 'id': obj.id, 'url': 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, \"Resultado atualizado com sucesso!\")\n return redirect('url_listagem_resultados')\n else:\n contexto = {\n 'form': form,\n 'texto_title': 'AtuRes',\n 'texto_titulo':'Atualização Resultados',\n 'texto_botao':'Atualizar',\n 'url_voltar':'url_listagem_resultados'\n }\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg': 'Erro ao executar esta operação!', 'obj': erro})\n\n",
"from django.shortcuts import render, redirect\nfrom core.forms import FormAluno, FormTurma, FormMateria, FormResultado\nfrom core.models import Aluno, Turma, Materia, Resultado\nfrom django.urls import reverse_lazy\nfrom django.views import generic\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_alunos(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['aluno_input']:\n dados = Aluno.objects.filter(nome=request.POST['aluno_input'])\n else:\n dados = Aluno.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_alunos.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Turma excluida com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url':\n 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n form = FormTurma(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma atualizada com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'form': form, 'texto_title': 'AtuTurm',\n 'texto_titulo': 'Atualização Turma', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_turmas'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_resultado(request):\n if request.user.is_staff:\n form = FormResultado(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado cadastrado com sucesso!')\n return redirect('url_listagem_resultados')\n contexto = {'form': form, 'texto_title': 'CadRes', 'texto_titulo':\n 'Cadastro Resultados', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Resultado excluido com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'dados': obj.idAluno, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_alunos(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['aluno_input']:\n dados = Aluno.objects.filter(nome=request.POST['aluno_input'])\n else:\n dados = Aluno.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_alunos.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Turma excluida com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url':\n 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n form = FormTurma(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma atualizada com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'form': form, 'texto_title': 'AtuTurm',\n 'texto_titulo': 'Atualização Turma', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_turmas'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_resultado(request):\n if request.user.is_staff:\n form = FormResultado(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado cadastrado com sucesso!')\n return redirect('url_listagem_resultados')\n contexto = {'form': form, 'texto_title': 'CadRes', 'texto_titulo':\n 'Cadastro Resultados', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Resultado excluido com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'dados': obj.idAluno, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_alunos(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['aluno_input']:\n dados = Aluno.objects.filter(nome=request.POST['aluno_input'])\n else:\n dados = Aluno.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_alunos.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Turma excluida com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url':\n 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_resultado(request):\n if request.user.is_staff:\n form = FormResultado(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado cadastrado com sucesso!')\n return redirect('url_listagem_resultados')\n contexto = {'form': form, 'texto_title': 'CadRes', 'texto_titulo':\n 'Cadastro Resultados', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Resultado excluido com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'dados': obj.idAluno, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_alunos(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['aluno_input']:\n dados = Aluno.objects.filter(nome=request.POST['aluno_input'])\n else:\n dados = Aluno.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_alunos.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Turma excluida com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url':\n 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Resultado excluido com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'dados': obj.idAluno, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Turma excluida com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url':\n 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Resultado excluido com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'dados': obj.idAluno, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_turma(request, id):\n try:\n if request.user.is_staff:\n obj = Turma.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Turma excluida com sucesso!')\n return redirect('url_listagem_turmas')\n else:\n contexto = {'dados': obj.serie, 'id': obj.id, 'url':\n 'url_listagem_turmas'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef atualiza_resultado(request, id):\n try:\n if request.user.is_staff:\n obj = Resultado.objects.get(id=id)\n form = FormResultado(request.POST or None, request.FILES or\n None, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Resultado atualizado com sucesso!')\n return redirect('url_listagem_resultados')\n else:\n contexto = {'form': form, 'texto_title': 'AtuRes',\n 'texto_titulo': 'Atualização Resultados', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_resultados'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef exclui_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Matéria excluida com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_materias'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef cadastro_turma(request):\n if request.user.is_staff:\n form = FormTurma(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Turma cadastrada com sucesso!')\n return redirect('url_listagem_turmas')\n contexto = {'form': form, 'texto_title': 'CadTurm', 'texto_titulo':\n 'Cadastro Turmas', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n\n\n@login_required\ndef cadastro_materia(request):\n if request.user.is_staff:\n form = FormMateria(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria cadastrada com sucesso!')\n return redirect('url_listagem_materias')\n contexto = {'form': form, 'texto_title': 'CadMateria',\n 'texto_titulo': 'Cadastro Materias', 'texto_botao': 'Cadastrar',\n 'url_voltar': 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef listagem_materias(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['materia_input']:\n dados = Materia.objects.filter(nome=request.POST[\n 'materia_input'])\n else:\n dados = Materia.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_materias.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_turmas(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['turma_input']:\n dados = Turma.objects.filter(serie=request.POST['turma_input'])\n else:\n dados = Turma.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_turmas.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n\n\n@login_required\ndef exclui_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n if request.POST:\n obj.delete()\n messages.success(request, 'Aluno excluido com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'dados': obj.nome, 'id': obj.id, 'url':\n 'url_listagem_alunos'}\n return render(request, 'core/confirma_exclusao.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n<function token>\n\n\n@login_required\ndef atualiza_aluno(request, id):\n try:\n if request.user.is_staff:\n obj = Aluno.objects.get(id=id)\n form = FormAluno(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno atualizado com sucesso!')\n return redirect('url_listagem_alunos')\n else:\n contexto = {'form': form, 'texto_title': 'AtuAlu',\n 'texto_titulo': 'Atualização Aluno', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_alunos'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef atualiza_materia(request, id):\n try:\n if request.user.is_staff:\n obj = Materia.objects.get(id=id)\n form = FormMateria(request.POST or None, request.FILES or None,\n instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Matéria atualizada com sucesso!')\n return redirect('url_listagem_materias')\n else:\n contexto = {'form': form, 'texto_title': 'AtuMateria',\n 'texto_titulo': 'Atualização Matéria', 'texto_botao':\n 'Atualizar', 'url_voltar': 'url_listagem_materias'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n@login_required\ndef cadastro_aluno(request):\n if request.user.is_staff:\n form = FormAluno(None or request.POST, request.FILES or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Aluno cadastrado com sucesso!')\n return redirect('url_listagem_alunos')\n contexto = {'form': form, 'texto_title': 'CadAlu', 'texto_titulo':\n 'Cadastro Aluno', 'texto_botao': 'Cadastrar', 'url_voltar':\n 'url_principal'}\n return render(request, 'core/cadastro.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@login_required\ndef listagem_resultados(request):\n try:\n if request.user.is_staff:\n if request.POST and request.POST['resultado_input']:\n dados = Resultado.objects.filter(idAluno__nome=request.POST\n ['resultado_input'])\n else:\n dados = Resultado.objects.all()\n contexto = {'dados': dados}\n return render(request, 'core/listagem_resultados.html', contexto)\n else:\n return render(request, 'core/NaoAutorizado.html')\n except Exception as erro:\n return render(request, 'error.html', {'msg':\n 'Erro ao executar esta operação!', 'obj': erro})\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Registrar(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/register.html'\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\nclass Registrar(generic.CreateView):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,437 |
6baa0921b3cea6c1a7cfe1b07a576c17b7c7ebf9
|
from __future__ import unicode_literals
from django.conf import settings
from django.http import Http404, JsonResponse
from django.template.loader import render_to_string
class AjaxResponseAction():
""" Represents list of actions available after ajax response """
NOTHING = "nothing"
REDIRECT = "redirect"
REFRESH = "refresh"
choices = (
NOTHING,
REDIRECT,
REFRESH
)
class AjaxResponseStatus():
""" Represents list of status available at ajax response """
ERROR = "error"
SUCCESS = "success"
choices = (
ERROR,
SUCCESS,
)
class AjaxResponseMixin(object):
""" Mixin responsible to give the JSON response """
action = AjaxResponseAction.NOTHING
json_status = AjaxResponseStatus.SUCCESS
def json_to_response(self, action=None, json_status=None, success_url=None,
json_data=None, **response_kwargs):
""" Valid response with next action to be followed by the JS """
data = {
"status": self.get_status(json_status),
"action": self.get_action(action),
"extra_data": self.get_json_data(json_data or {})
}
if self.action == AjaxResponseAction.REDIRECT:
data["action_url"] = success_url or self.get_success_url()
return JsonResponse(data, **response_kwargs)
def get_action(self, action=None):
""" Returns action to take after call """
if action:
self.action = action
if self.action not in AjaxResponseAction.choices:
raise ValueError(
"Invalid action selected: '{}'".format(self.action))
return self.action
def get_status(self, json_status=None):
""" Returns status of for json """
if json_status:
self.json_status = json_status
if self.json_status not in AjaxResponseStatus.choices:
raise ValueError(
"Invalid status selected: '{}'".format(self.json_status))
return self.json_status
def get_json_data(self, json_data=None):
""" Returns any extra data to add to json """
return json_data or {}
class FormAjaxMixin(AjaxResponseMixin):
""" Mixin responsible to take care of form ajax submission """
def form_invalid(self, form, prefix=None):
""" If form invalid return error list in JSON response """
response = super(FormAjaxMixin, self).form_invalid(form)
if self.request.is_ajax():
data = {
"errors_list": self.add_prefix(form.errors, prefix),
}
return self.json_to_response(status=400, json_data=data,
json_status=AjaxResponseStatus.ERROR)
return response
def get_success_url(self):
""" """
if not self.request.is_ajax():
return super(FormAjaxMixin, self).get_success_url()
return None
def form_valid(self, form):
""" If form valid return response with action """
response = super(FormAjaxMixin, self).form_valid(form)
if self.request.is_ajax():
return self.json_to_response()
return response
def add_prefix(self, errors, prefix):
"""Add form prefix to errors"""
if not prefix:
prefix = self.get_prefix()
if prefix:
return {"%s-%s" % (prefix, k): v for k, v in errors.items()}
return errors
class PartialAjaxMixin(object):
""" Mixin responsible to return the JSON with template rendered """
partial_title = None
def get_partial_title(self):
return self.partial_title
def get_context_data(self, **kwargs):
context = super(PartialAjaxMixin, self).get_context_data(**kwargs)
partial_title = self.get_partial_title()
if partial_title:
context.update({
'title': partial_title
})
return context
def render_to_response(self, context, **response_kwargs):
""" Returns the rendered template in JSON format """
if self.request.is_ajax():
data = {
"content": render_to_string(
self.get_template_names(), context, request=self.request)
}
return JsonResponse(data)
if settings.DEBUG:
return super(PartialAjaxMixin, self).render_to_response(
context, **response_kwargs)
raise Http404()
|
[
"from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import Http404, JsonResponse\nfrom django.template.loader import render_to_string\n\n\nclass AjaxResponseAction():\n \"\"\" Represents list of actions available after ajax response \"\"\"\n\n NOTHING = \"nothing\"\n REDIRECT = \"redirect\"\n REFRESH = \"refresh\"\n\n choices = (\n NOTHING,\n REDIRECT,\n REFRESH\n )\n\n\nclass AjaxResponseStatus():\n \"\"\" Represents list of status available at ajax response \"\"\"\n\n ERROR = \"error\"\n SUCCESS = \"success\"\n\n choices = (\n ERROR,\n SUCCESS,\n )\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=None,\n json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {\n \"status\": self.get_status(json_status),\n \"action\": self.get_action(action),\n \"extra_data\": self.get_json_data(json_data or {})\n }\n\n if self.action == AjaxResponseAction.REDIRECT:\n data[\"action_url\"] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\n \"Invalid action selected: '{}'\".format(self.action))\n\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\n \"Invalid status selected: '{}'\".format(self.json_status))\n\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {\n \"errors_list\": self.add_prefix(form.errors, prefix),\n }\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {\"%s-%s\" % (prefix, k): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({\n 'title': partial_title\n })\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {\n \"content\": render_to_string(\n self.get_template_names(), context, request=self.request)\n }\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(\n context, **response_kwargs)\n raise Http404()\n",
"from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.http import Http404, JsonResponse\nfrom django.template.loader import render_to_string\n\n\nclass AjaxResponseAction:\n \"\"\" Represents list of actions available after ajax response \"\"\"\n NOTHING = 'nothing'\n REDIRECT = 'redirect'\n REFRESH = 'refresh'\n choices = NOTHING, REDIRECT, REFRESH\n\n\nclass AjaxResponseStatus:\n \"\"\" Represents list of status available at ajax response \"\"\"\n ERROR = 'error'\n SUCCESS = 'success'\n choices = ERROR, SUCCESS\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n\n\nclass AjaxResponseAction:\n \"\"\" Represents list of actions available after ajax response \"\"\"\n NOTHING = 'nothing'\n REDIRECT = 'redirect'\n REFRESH = 'refresh'\n choices = NOTHING, REDIRECT, REFRESH\n\n\nclass AjaxResponseStatus:\n \"\"\" Represents list of status available at ajax response \"\"\"\n ERROR = 'error'\n SUCCESS = 'success'\n choices = ERROR, SUCCESS\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n\n\nclass AjaxResponseAction:\n <docstring token>\n NOTHING = 'nothing'\n REDIRECT = 'redirect'\n REFRESH = 'refresh'\n choices = NOTHING, REDIRECT, REFRESH\n\n\nclass AjaxResponseStatus:\n \"\"\" Represents list of status available at ajax response \"\"\"\n ERROR = 'error'\n SUCCESS = 'success'\n choices = ERROR, SUCCESS\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n\n\nclass AjaxResponseAction:\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass AjaxResponseStatus:\n \"\"\" Represents list of status available at ajax response \"\"\"\n ERROR = 'error'\n SUCCESS = 'success'\n choices = ERROR, SUCCESS\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n\n\nclass AjaxResponseStatus:\n \"\"\" Represents list of status available at ajax response \"\"\"\n ERROR = 'error'\n SUCCESS = 'success'\n choices = ERROR, SUCCESS\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n\n\nclass AjaxResponseStatus:\n <docstring token>\n ERROR = 'error'\n SUCCESS = 'success'\n choices = ERROR, SUCCESS\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n\n\nclass AjaxResponseStatus:\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n \"\"\" Mixin responsible to give the JSON response \"\"\"\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n <docstring token>\n action = AjaxResponseAction.NOTHING\n json_status = AjaxResponseStatus.SUCCESS\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n\n def get_status(self, json_status=None):\n \"\"\" Returns status of for json \"\"\"\n if json_status:\n self.json_status = json_status\n if self.json_status not in AjaxResponseStatus.choices:\n raise ValueError(\"Invalid status selected: '{}'\".format(self.\n json_status))\n return self.json_status\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def json_to_response(self, action=None, json_status=None, success_url=\n None, json_data=None, **response_kwargs):\n \"\"\" Valid response with next action to be followed by the JS \"\"\"\n data = {'status': self.get_status(json_status), 'action': self.\n get_action(action), 'extra_data': self.get_json_data(json_data or\n {})}\n if self.action == AjaxResponseAction.REDIRECT:\n data['action_url'] = success_url or self.get_success_url()\n return JsonResponse(data, **response_kwargs)\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n <function token>\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_action(self, action=None):\n \"\"\" Returns action to take after call \"\"\"\n if action:\n self.action = action\n if self.action not in AjaxResponseAction.choices:\n raise ValueError(\"Invalid action selected: '{}'\".format(self.\n action))\n return self.action\n <function token>\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_json_data(self, json_data=None):\n \"\"\" Returns any extra data to add to json \"\"\"\n return json_data or {}\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n\n\nclass AjaxResponseMixin(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n \"\"\" Mixin responsible to take care of form ajax submission \"\"\"\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n <docstring token>\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n\n def add_prefix(self, errors, prefix):\n \"\"\"Add form prefix to errors\"\"\"\n if not prefix:\n prefix = self.get_prefix()\n if prefix:\n return {('%s-%s' % (prefix, k)): v for k, v in errors.items()}\n return errors\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n <docstring token>\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n\n def form_valid(self, form):\n \"\"\" If form valid return response with action \"\"\"\n response = super(FormAjaxMixin, self).form_valid(form)\n if self.request.is_ajax():\n return self.json_to_response()\n return response\n <function token>\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n <docstring token>\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n\n def get_success_url(self):\n \"\"\" \"\"\"\n if not self.request.is_ajax():\n return super(FormAjaxMixin, self).get_success_url()\n return None\n <function token>\n <function token>\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n <docstring token>\n\n def form_invalid(self, form, prefix=None):\n \"\"\" If form invalid return error list in JSON response \"\"\"\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {'errors_list': self.add_prefix(form.errors, prefix)}\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response\n <function token>\n <function token>\n <function token>\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass FormAjaxMixin(AjaxResponseMixin):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PartialAjaxMixin(object):\n \"\"\" Mixin responsible to return the JSON with template rendered \"\"\"\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PartialAjaxMixin(object):\n <docstring token>\n partial_title = None\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PartialAjaxMixin(object):\n <docstring token>\n <assignment token>\n\n def get_partial_title(self):\n return self.partial_title\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PartialAjaxMixin(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\" Returns the rendered template in JSON format \"\"\"\n if self.request.is_ajax():\n data = {'content': render_to_string(self.get_template_names(),\n context, request=self.request)}\n return JsonResponse(data)\n if settings.DEBUG:\n return super(PartialAjaxMixin, self).render_to_response(context,\n **response_kwargs)\n raise Http404()\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PartialAjaxMixin(object):\n <docstring token>\n <assignment token>\n <function token>\n\n def get_context_data(self, **kwargs):\n context = super(PartialAjaxMixin, self).get_context_data(**kwargs)\n partial_title = self.get_partial_title()\n if partial_title:\n context.update({'title': partial_title})\n return context\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PartialAjaxMixin(object):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,438 |
f181c41edfd842698b13db045e4fc138b61461dd
|
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='localhost', port=9200)
es.indices.delete(index='school_members')
|
[
"from elasticsearch import Elasticsearch\n\nes = Elasticsearch(hosts='localhost', port=9200)\n\nes.indices.delete(index='school_members')",
"from elasticsearch import Elasticsearch\nes = Elasticsearch(hosts='localhost', port=9200)\nes.indices.delete(index='school_members')\n",
"<import token>\nes = Elasticsearch(hosts='localhost', port=9200)\nes.indices.delete(index='school_members')\n",
"<import token>\n<assignment token>\nes.indices.delete(index='school_members')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,439 |
3f3b1f2ea718a828015e3fd3c422324a4b506ccc
|
from dogapi import dog_http_api as api
api.api_key = '9775a026f1ca7d1c6c5af9d94d9595a4'
api.application_key = '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
# Edit a comment.
comment_id = 123482347822
api.comment('[email protected]', 'I think differently now.', comment_id = 12345)
|
[
"from dogapi import dog_http_api as api\n\napi.api_key = '9775a026f1ca7d1c6c5af9d94d9595a4'\napi.application_key = '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'\n\n# Edit a comment.\ncomment_id = 123482347822\napi.comment('[email protected]', 'I think differently now.', comment_id = 12345)\n",
"from dogapi import dog_http_api as api\napi.api_key = '9775a026f1ca7d1c6c5af9d94d9595a4'\napi.application_key = '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'\ncomment_id = 123482347822\napi.comment('[email protected]', 'I think differently now.', comment_id=12345)\n",
"<import token>\napi.api_key = '9775a026f1ca7d1c6c5af9d94d9595a4'\napi.application_key = '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'\ncomment_id = 123482347822\napi.comment('[email protected]', 'I think differently now.', comment_id=12345)\n",
"<import token>\n<assignment token>\napi.comment('[email protected]', 'I think differently now.', comment_id=12345)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,440 |
8fcd3ef21e713ebf8360b6fd08c727fbfa3ff81e
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 19:43:18 2019
@author: E442282
"""
import numpy as np
import cv2
import sys
from matplotlib import pyplot as plt
def getColorSpaces(image):
rgb = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
return rgb,gray
def getImageDimnesion(image):
height,width = image.shape[:2]
return height,width
def showImage(image,title,cmap):
plt.imshow(image,cmap=cmap)
plt.axis('off')
plt.title(title)
def splitRGBChannels(image):
red, green, blue= cv2.split(image)
return red, green, blue
def getBinaryImage(gray,thr=127):
ret,thresh= cv2.threshold(gray,thr,255,cv2.THRESH_BINARY)
return thresh
def getHistogramAdjusted(bgr):
lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return adjusted
def getHSVMask(im):
hMin = 0
sMin = 0
vMin = 220
hMax = 180
sMax = 20
vMax = 255
# Set minimum and max HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
hsv_mask = cv2.inRange(hsv, lower, upper)
return hsv_mask
img = cv2.imread('output_49.jpg')
adjusted=getHistogramAdjusted(img)
bilateral = cv2.bilateralFilter(adjusted, 7, sigmaSpace = 75, sigmaColor =75)
#rgb,gray=getColorSpaces(bilateral)
#mask= getBinaryImage(gray,220)
hsv_mask=getHSVMask(bilateral)
mask= hsv_mask.copy()
plt.axis('off')
plt.imshow(img,cmap='gray')
plt.show()
plt.axis('off')
plt.imshow(mask,cmap='gray')
plt.show()
img2 = img.copy()
output = np.zeros(img.shape, np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
#https://stackoverflow.com/questions/53887425/opencv-grabcut-doesnt-update-mask-when-on-gc-init-with-mask-mode
init_mask = mask.copy()
mask[init_mask == 255] = 1
mask[init_mask == 0] = 2 #Guess everything else is background
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask, bgdModel, fgdModel = cv2.grabCut(img2,mask,None,bgdModel,fgdModel,2,cv2.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
mask[mask == 1] = 255
plt.axis('off')
plt.imshow(mask,cmap='gray')
plt.show()
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 20 19:43:18 2019\n\n@author: E442282\n\"\"\"\n\n\nimport numpy as np\nimport cv2 \n\nimport sys\nfrom matplotlib import pyplot as plt\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)\n \n return rgb,gray\n\ndef getImageDimnesion(image):\n height,width = image.shape[:2]\n \n return height,width\n\ndef showImage(image,title,cmap):\n plt.imshow(image,cmap=cmap)\n plt.axis('off')\n plt.title(title)\n\n\ndef splitRGBChannels(image):\n red, green, blue= cv2.split(image)\n \n return red, green, blue\n \ndef getBinaryImage(gray,thr=127):\n ret,thresh= cv2.threshold(gray,thr,255,cv2.THRESH_BINARY)\n return thresh\n \ndef getHistogramAdjusted(bgr):\n lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB) \n lab_planes = cv2.split(lab) \n clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8)) \n lab_planes[0] = clahe.apply(lab_planes[0]) \n lab = cv2.merge(lab_planes) \n adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) \n\n return adjusted \n\n\n\ndef getHSVMask(im): \n hMin = 0\n sMin = 0\n vMin = 220\n \n hMax = 180\n sMax = 20\n vMax = 255\n \n # Set minimum and max HSV values to display\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n \n # Create HSV Image and threshold into a range.\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n\n return hsv_mask\n\nimg = cv2.imread('output_49.jpg')\n\nadjusted=getHistogramAdjusted(img)\nbilateral = cv2.bilateralFilter(adjusted, 7, sigmaSpace = 75, sigmaColor =75)\n\n\n\n\n#rgb,gray=getColorSpaces(bilateral)\n#mask= getBinaryImage(gray,220)\n\nhsv_mask=getHSVMask(bilateral)\nmask= hsv_mask.copy()\n\nplt.axis('off')\nplt.imshow(img,cmap='gray')\nplt.show()\n\nplt.axis('off')\nplt.imshow(mask,cmap='gray')\nplt.show()\n\nimg2 = img.copy() \n\noutput = np.zeros(img.shape, np.uint8) \nbgdModel = np.zeros((1, 65), np.float64)\nfgdModel = np.zeros((1, 65), np.float64)\n\n#https://stackoverflow.com/questions/53887425/opencv-grabcut-doesnt-update-mask-when-on-gc-init-with-mask-mode\n\ninit_mask = mask.copy()\n\nmask[init_mask == 255] = 1\nmask[init_mask == 0] = 2 #Guess everything else is background\n\nbgdModel = np.zeros((1,65),np.float64)\nfgdModel = np.zeros((1,65),np.float64)\n\nmask, bgdModel, fgdModel = cv2.grabCut(img2,mask,None,bgdModel,fgdModel,2,cv2.GC_INIT_WITH_MASK)\n\nmask = np.where((mask==2)|(mask==0),0,1).astype('uint8')\nmask[mask == 1] = 255\nplt.axis('off')\nplt.imshow(mask,cmap='gray')\nplt.show()\n",
"<docstring token>\nimport numpy as np\nimport cv2\nimport sys\nfrom matplotlib import pyplot as plt\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\ndef getImageDimnesion(image):\n height, width = image.shape[:2]\n return height, width\n\n\ndef showImage(image, title, cmap):\n plt.imshow(image, cmap=cmap)\n plt.axis('off')\n plt.title(title)\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\ndef getHistogramAdjusted(bgr):\n lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)\n lab_planes = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n lab_planes[0] = clahe.apply(lab_planes[0])\n lab = cv2.merge(lab_planes)\n adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n return adjusted\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\nimg = cv2.imread('output_49.jpg')\nadjusted = getHistogramAdjusted(img)\nbilateral = cv2.bilateralFilter(adjusted, 7, sigmaSpace=75, sigmaColor=75)\nhsv_mask = getHSVMask(bilateral)\nmask = hsv_mask.copy()\nplt.axis('off')\nplt.imshow(img, cmap='gray')\nplt.show()\nplt.axis('off')\nplt.imshow(mask, cmap='gray')\nplt.show()\nimg2 = img.copy()\noutput = np.zeros(img.shape, np.uint8)\nbgdModel = np.zeros((1, 65), np.float64)\nfgdModel = np.zeros((1, 65), np.float64)\ninit_mask = mask.copy()\nmask[init_mask == 255] = 1\nmask[init_mask == 0] = 2\nbgdModel = np.zeros((1, 65), np.float64)\nfgdModel = np.zeros((1, 65), np.float64)\nmask, bgdModel, fgdModel = cv2.grabCut(img2, mask, None, bgdModel, fgdModel,\n 2, cv2.GC_INIT_WITH_MASK)\nmask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\nmask[mask == 1] = 255\nplt.axis('off')\nplt.imshow(mask, cmap='gray')\nplt.show()\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\ndef getImageDimnesion(image):\n height, width = image.shape[:2]\n return height, width\n\n\ndef showImage(image, title, cmap):\n plt.imshow(image, cmap=cmap)\n plt.axis('off')\n plt.title(title)\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\ndef getHistogramAdjusted(bgr):\n lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)\n lab_planes = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n lab_planes[0] = clahe.apply(lab_planes[0])\n lab = cv2.merge(lab_planes)\n adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n return adjusted\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\nimg = cv2.imread('output_49.jpg')\nadjusted = getHistogramAdjusted(img)\nbilateral = cv2.bilateralFilter(adjusted, 7, sigmaSpace=75, sigmaColor=75)\nhsv_mask = getHSVMask(bilateral)\nmask = hsv_mask.copy()\nplt.axis('off')\nplt.imshow(img, cmap='gray')\nplt.show()\nplt.axis('off')\nplt.imshow(mask, cmap='gray')\nplt.show()\nimg2 = img.copy()\noutput = np.zeros(img.shape, np.uint8)\nbgdModel = np.zeros((1, 65), np.float64)\nfgdModel = np.zeros((1, 65), np.float64)\ninit_mask = mask.copy()\nmask[init_mask == 255] = 1\nmask[init_mask == 0] = 2\nbgdModel = np.zeros((1, 65), np.float64)\nfgdModel = np.zeros((1, 65), np.float64)\nmask, bgdModel, fgdModel = cv2.grabCut(img2, mask, None, bgdModel, fgdModel,\n 2, cv2.GC_INIT_WITH_MASK)\nmask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\nmask[mask == 1] = 255\nplt.axis('off')\nplt.imshow(mask, cmap='gray')\nplt.show()\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\ndef getImageDimnesion(image):\n height, width = image.shape[:2]\n return height, width\n\n\ndef showImage(image, title, cmap):\n plt.imshow(image, cmap=cmap)\n plt.axis('off')\n plt.title(title)\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\ndef getHistogramAdjusted(bgr):\n lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)\n lab_planes = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n lab_planes[0] = clahe.apply(lab_planes[0])\n lab = cv2.merge(lab_planes)\n adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n return adjusted\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\nplt.axis('off')\nplt.imshow(img, cmap='gray')\nplt.show()\nplt.axis('off')\nplt.imshow(mask, cmap='gray')\nplt.show()\n<assignment token>\nplt.axis('off')\nplt.imshow(mask, cmap='gray')\nplt.show()\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\ndef getImageDimnesion(image):\n height, width = image.shape[:2]\n return height, width\n\n\ndef showImage(image, title, cmap):\n plt.imshow(image, cmap=cmap)\n plt.axis('off')\n plt.title(title)\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\ndef getHistogramAdjusted(bgr):\n lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)\n lab_planes = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n lab_planes[0] = clahe.apply(lab_planes[0])\n lab = cv2.merge(lab_planes)\n adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n return adjusted\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\ndef getImageDimnesion(image):\n height, width = image.shape[:2]\n return height, width\n\n\n<function token>\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\ndef getHistogramAdjusted(bgr):\n lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)\n lab_planes = cv2.split(lab)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n lab_planes[0] = clahe.apply(lab_planes[0])\n lab = cv2.merge(lab_planes)\n adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n return adjusted\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\ndef getImageDimnesion(image):\n height, width = image.shape[:2]\n return height, width\n\n\n<function token>\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\n<function token>\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\n<function token>\n<function token>\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\ndef getBinaryImage(gray, thr=127):\n ret, thresh = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY)\n return thresh\n\n\n<function token>\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\n<function token>\n<function token>\n\n\ndef splitRGBChannels(image):\n red, green, blue = cv2.split(image)\n return red, green, blue\n\n\n<function token>\n<function token>\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getHSVMask(im):\n hMin = 0\n sMin = 0\n vMin = 220\n hMax = 180\n sMax = 20\n vMax = 255\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n hsv_mask = cv2.inRange(hsv, lower, upper)\n return hsv_mask\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef getColorSpaces(image):\n rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return rgb, gray\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,441 |
2e58aaa7158a4735039c2d44dbadf8e0fd936858
|
import functools
from flask import ( Blueprint, flash, g, redirect, render_template,request,session,url_for)
from db import get_db
import auth
from auth import login_required
bp = Blueprint('media', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('media/index.html', posts=posts)
@bp.route('/<artist>/<album>/<track>')
def display(artist, album, track):
#generic function for creating the html page for accessihng a given track
db = get_db();
post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?', (artist,track,) ).fetchone()
if(post is not None):
if(post[2] == 'song'):
return render_template('/media/song.html', post = post)
elif(post[2] == 'movie'):
return render_template('/media/movie.html', post = post)
elif(post[2] == 'television'):
return render_template('/media/tv.html', post = post)
else:
return "error"
@bp.route('/create', methods=('GET','POST'))
@login_required
def create():
#user is attemting to add an item to the collection
#check that user is logged in, gather properties
#submit the media object to the DATABASE
if request.method == 'POST':
title = request.form['title']
format = request.form['format']
artist = request.form['artist']
body = None
error = None
if format == 'song':
body = request['lyrics']
elif format == 'movie':
body = request['synopsis']
elif format == 'television':
body = request['screenplay']
if not title:
error = 'title error'
if not body:
error = 'no body'
if not format:
error = 'no format'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, artist, format, body, author_id)'
' VALUES (?, ?, ?, ?, ?)',
(title, artist, format, body, g.user['id'])
)
db.commit()
return redirect(url_for('media.index'))
return render_template('media/create.html')
def get_media(id, check_author=True):
post = get_db.execute('SELECT p.id, title, body, created, author_id, username'
' FROM post p'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
|
[
"import functools\nfrom flask import ( Blueprint, flash, g, redirect, render_template,request,session,url_for)\n\nfrom db import get_db\nimport auth\nfrom auth import login_required\nbp = Blueprint('media', __name__)\[email protected]('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' ORDER BY created DESC'\n ).fetchall()\n return render_template('media/index.html', posts=posts)\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n #generic function for creating the html page for accessihng a given track\n db = get_db();\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?', (artist,track,) ).fetchone()\n if(post is not None):\n if(post[2] == 'song'):\n return render_template('/media/song.html', post = post)\n elif(post[2] == 'movie'):\n return render_template('/media/movie.html', post = post)\n elif(post[2] == 'television'):\n return render_template('/media/tv.html', post = post)\n else:\n return \"error\"\n\n\[email protected]('/create', methods=('GET','POST'))\n@login_required\ndef create():\n #user is attemting to add an item to the collection\n #check that user is logged in, gather properties\n #submit the media object to the DATABASE\n if request.method == 'POST':\n title = request.form['title']\n format = request.form['format']\n artist = request.form['artist']\n body = None\n error = None\n if format == 'song':\n body = request['lyrics']\n elif format == 'movie':\n body = request['synopsis']\n elif format == 'television':\n body = request['screenplay']\n if not title:\n error = 'title error'\n if not body:\n error = 'no body'\n if not format:\n error = 'no format'\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, artist, format, body, author_id)'\n ' VALUES (?, ?, ?, ?, ?)',\n (title, artist, format, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('media.index'))\n\n return render_template('media/create.html')\ndef get_media(id, check_author=True):\n post = get_db.execute('SELECT p.id, title, body, created, author_id, username'\n ' FROM post p'\n ' WHERE p.id = ?',\n (id,)\n ).fetchone()\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n\n return post\n",
"import functools\nfrom flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\nfrom db import get_db\nimport auth\nfrom auth import login_required\nbp = Blueprint('media', __name__)\n\n\[email protected]('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p JOIN user u ON p.author_id = u.id ORDER BY created DESC'\n ).fetchall()\n return render_template('media/index.html', posts=posts)\n\n\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n db = get_db()\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?',\n (artist, track)).fetchone()\n if post is not None:\n if post[2] == 'song':\n return render_template('/media/song.html', post=post)\n elif post[2] == 'movie':\n return render_template('/media/movie.html', post=post)\n elif post[2] == 'television':\n return render_template('/media/tv.html', post=post)\n else:\n return 'error'\n\n\[email protected]('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n format = request.form['format']\n artist = request.form['artist']\n body = None\n error = None\n if format == 'song':\n body = request['lyrics']\n elif format == 'movie':\n body = request['synopsis']\n elif format == 'television':\n body = request['screenplay']\n if not title:\n error = 'title error'\n if not body:\n error = 'no body'\n if not format:\n error = 'no format'\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, artist, format, body, author_id) VALUES (?, ?, ?, ?, ?)'\n , (title, artist, format, body, g.user['id']))\n db.commit()\n return redirect(url_for('media.index'))\n return render_template('media/create.html')\n\n\ndef get_media(id, check_author=True):\n post = get_db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p WHERE p.id = ?'\n , (id,)).fetchone()\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n return post\n",
"<import token>\nbp = Blueprint('media', __name__)\n\n\[email protected]('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p JOIN user u ON p.author_id = u.id ORDER BY created DESC'\n ).fetchall()\n return render_template('media/index.html', posts=posts)\n\n\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n db = get_db()\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?',\n (artist, track)).fetchone()\n if post is not None:\n if post[2] == 'song':\n return render_template('/media/song.html', post=post)\n elif post[2] == 'movie':\n return render_template('/media/movie.html', post=post)\n elif post[2] == 'television':\n return render_template('/media/tv.html', post=post)\n else:\n return 'error'\n\n\[email protected]('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n format = request.form['format']\n artist = request.form['artist']\n body = None\n error = None\n if format == 'song':\n body = request['lyrics']\n elif format == 'movie':\n body = request['synopsis']\n elif format == 'television':\n body = request['screenplay']\n if not title:\n error = 'title error'\n if not body:\n error = 'no body'\n if not format:\n error = 'no format'\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, artist, format, body, author_id) VALUES (?, ?, ?, ?, ?)'\n , (title, artist, format, body, g.user['id']))\n db.commit()\n return redirect(url_for('media.index'))\n return render_template('media/create.html')\n\n\ndef get_media(id, check_author=True):\n post = get_db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p WHERE p.id = ?'\n , (id,)).fetchone()\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n return post\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p JOIN user u ON p.author_id = u.id ORDER BY created DESC'\n ).fetchall()\n return render_template('media/index.html', posts=posts)\n\n\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n db = get_db()\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?',\n (artist, track)).fetchone()\n if post is not None:\n if post[2] == 'song':\n return render_template('/media/song.html', post=post)\n elif post[2] == 'movie':\n return render_template('/media/movie.html', post=post)\n elif post[2] == 'television':\n return render_template('/media/tv.html', post=post)\n else:\n return 'error'\n\n\[email protected]('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n format = request.form['format']\n artist = request.form['artist']\n body = None\n error = None\n if format == 'song':\n body = request['lyrics']\n elif format == 'movie':\n body = request['synopsis']\n elif format == 'television':\n body = request['screenplay']\n if not title:\n error = 'title error'\n if not body:\n error = 'no body'\n if not format:\n error = 'no format'\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, artist, format, body, author_id) VALUES (?, ?, ?, ?, ?)'\n , (title, artist, format, body, g.user['id']))\n db.commit()\n return redirect(url_for('media.index'))\n return render_template('media/create.html')\n\n\ndef get_media(id, check_author=True):\n post = get_db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p WHERE p.id = ?'\n , (id,)).fetchone()\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n return post\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p JOIN user u ON p.author_id = u.id ORDER BY created DESC'\n ).fetchall()\n return render_template('media/index.html', posts=posts)\n\n\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n db = get_db()\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?',\n (artist, track)).fetchone()\n if post is not None:\n if post[2] == 'song':\n return render_template('/media/song.html', post=post)\n elif post[2] == 'movie':\n return render_template('/media/movie.html', post=post)\n elif post[2] == 'television':\n return render_template('/media/tv.html', post=post)\n else:\n return 'error'\n\n\n<function token>\n\n\ndef get_media(id, check_author=True):\n post = get_db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p WHERE p.id = ?'\n , (id,)).fetchone()\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n return post\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username FROM post p JOIN user u ON p.author_id = u.id ORDER BY created DESC'\n ).fetchall()\n return render_template('media/index.html', posts=posts)\n\n\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n db = get_db()\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?',\n (artist, track)).fetchone()\n if post is not None:\n if post[2] == 'song':\n return render_template('/media/song.html', post=post)\n elif post[2] == 'movie':\n return render_template('/media/movie.html', post=post)\n elif post[2] == 'television':\n return render_template('/media/tv.html', post=post)\n else:\n return 'error'\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]('/<artist>/<album>/<track>')\ndef display(artist, album, track):\n db = get_db()\n post = db.execute('SELECT * FROM post p WHERE title = ? AND artist = ?',\n (artist, track)).fetchone()\n if post is not None:\n if post[2] == 'song':\n return render_template('/media/song.html', post=post)\n elif post[2] == 'movie':\n return render_template('/media/movie.html', post=post)\n elif post[2] == 'television':\n return render_template('/media/tv.html', post=post)\n else:\n return 'error'\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,442 |
70ed45656caa91d10a8816e1db8af24461be49fb
|
def gcd(m,n):
x = max(m,n)
y = min(m,n)
while x%y != 0:
z = x%y
x = y
y = z
else:
return y
def lcm(m,n):
return int(m * n /gcd(m,n))
A,B = map( int ,input().split())
print(lcm(A,B))
|
[
"def gcd(m,n):\n x = max(m,n)\n y = min(m,n)\n while x%y != 0:\n z = x%y\n x = y\n y = z\n else:\n return y\ndef lcm(m,n):\n return int(m * n /gcd(m,n))\n\nA,B = map( int ,input().split())\nprint(lcm(A,B))",
"def gcd(m, n):\n x = max(m, n)\n y = min(m, n)\n while x % y != 0:\n z = x % y\n x = y\n y = z\n else:\n return y\n\n\ndef lcm(m, n):\n return int(m * n / gcd(m, n))\n\n\nA, B = map(int, input().split())\nprint(lcm(A, B))\n",
"def gcd(m, n):\n x = max(m, n)\n y = min(m, n)\n while x % y != 0:\n z = x % y\n x = y\n y = z\n else:\n return y\n\n\ndef lcm(m, n):\n return int(m * n / gcd(m, n))\n\n\n<assignment token>\nprint(lcm(A, B))\n",
"def gcd(m, n):\n x = max(m, n)\n y = min(m, n)\n while x % y != 0:\n z = x % y\n x = y\n y = z\n else:\n return y\n\n\ndef lcm(m, n):\n return int(m * n / gcd(m, n))\n\n\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef lcm(m, n):\n return int(m * n / gcd(m, n))\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,443 |
77d852642004a8d05852fca2e93d7602d217169c
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 26 11:07:23 2018
@author: ayang
"""
import argparse
import json
import sys
import keras.models
import numpy as np
import pandas as pd
DIVERSITY_SAMPLES = [0.2, 0.5, 1.0, 1.2]
SEED_LENGTH = 40
SEED_CHARSET = list('abcdefghijklmnopqrstuvwxyz')
def _setup_args():
parser = argparse.ArgumentParser()
parser.add_argument('--role', default='rory', help='Role (Rory or Lorelai)', required=True)
parser.add_argument('--seed', help='Random seed', required=True)
parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES, help='Diversity')
parser.add_argument('--output-length', default=400, type=int, help='Output sentence length')
return parser.parse_args()
def _sample(preds, temperature=1.0):
"""
helper function to sample an index from a probability array
"""
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def _load_model(role):
model_file = "weights_{}.hdf5".format(role)
return keras.models.load_model(model_file)
def _load_lines(role):
data = []
with open('../scrape/script.jl') as f:
for line in f:
data.append(json.loads(line))
role_subset = [item for item in data if item['actor'].lower() == role]
role_dataframe = pd.DataFrame(role_subset)
role_line = ' '.join(map(str, role_dataframe.line)).lower()
chars = sorted(list(set(role_line)))
return chars
def _main():
args = _setup_args()
role = (args.role).lower()
if role not in ['rory', 'lorelai']:
raise ValueError('Expected Rory or Lorelai')
model = _load_model(role)
chars = _load_lines(role)
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
seed = args.seed
if len(seed) > SEED_LENGTH:
seed = seed[0:SEED_LENGTH]
elif len(seed) < SEED_LENGTH:
raise ValueError('Seed needs to have {} characters.'.format(SEED_LENGTH))
seed = seed.lower()
div = [float(value) for value in args.diversity]
result = {
'role': role,
'seed': seed,
'diversity': div,
'result': {}
}
for diversity in div:
generated = ''
sentence = seed
for i in range(args.output_length):
x_pred = np.zeros((1, SEED_LENGTH, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = _sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
result['result'][diversity] = seed + generated
print(json.dumps(result))
if __name__ == '__main__':
_main()
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 26 11:07:23 2018\n\n@author: ayang\n\"\"\"\n\nimport argparse\nimport json\nimport sys\n\nimport keras.models\nimport numpy as np\nimport pandas as pd\n\nDIVERSITY_SAMPLES = [0.2, 0.5, 1.0, 1.2]\nSEED_LENGTH = 40\nSEED_CHARSET = list('abcdefghijklmnopqrstuvwxyz')\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--role', default='rory', help='Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES, help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help='Output sentence length')\n\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef _load_model(role):\n model_file = \"weights_{}.hdf5\".format(role)\n return keras.models.load_model(model_file)\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\ndef _main():\n args = _setup_args()\n\n role = (args.role).lower()\n if role not in ['rory', 'lorelai']:\n raise ValueError('Expected Rory or Lorelai')\n model = _load_model(role)\n chars = _load_lines(role)\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n\n seed = args.seed\n if len(seed) > SEED_LENGTH:\n seed = seed[0:SEED_LENGTH]\n elif len(seed) < SEED_LENGTH:\n raise ValueError('Seed needs to have {} characters.'.format(SEED_LENGTH))\n\n seed = seed.lower()\n\n div = [float(value) for value in args.diversity]\n\n result = {\n 'role': role,\n 'seed': seed,\n 'diversity': div,\n 'result': {}\n }\n\n for diversity in div:\n generated = ''\n sentence = seed\n\n for i in range(args.output_length):\n x_pred = np.zeros((1, SEED_LENGTH, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = _sample(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n result['result'][diversity] = seed + generated\n\n print(json.dumps(result))\n\n\nif __name__ == '__main__':\n _main()\n",
"<docstring token>\nimport argparse\nimport json\nimport sys\nimport keras.models\nimport numpy as np\nimport pandas as pd\nDIVERSITY_SAMPLES = [0.2, 0.5, 1.0, 1.2]\nSEED_LENGTH = 40\nSEED_CHARSET = list('abcdefghijklmnopqrstuvwxyz')\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef _load_model(role):\n model_file = 'weights_{}.hdf5'.format(role)\n return keras.models.load_model(model_file)\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\ndef _main():\n args = _setup_args()\n role = args.role.lower()\n if role not in ['rory', 'lorelai']:\n raise ValueError('Expected Rory or Lorelai')\n model = _load_model(role)\n chars = _load_lines(role)\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n seed = args.seed\n if len(seed) > SEED_LENGTH:\n seed = seed[0:SEED_LENGTH]\n elif len(seed) < SEED_LENGTH:\n raise ValueError('Seed needs to have {} characters.'.format(\n SEED_LENGTH))\n seed = seed.lower()\n div = [float(value) for value in args.diversity]\n result = {'role': role, 'seed': seed, 'diversity': div, 'result': {}}\n for diversity in div:\n generated = ''\n sentence = seed\n for i in range(args.output_length):\n x_pred = np.zeros((1, SEED_LENGTH, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.0\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = _sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n result['result'][diversity] = seed + generated\n print(json.dumps(result))\n\n\nif __name__ == '__main__':\n _main()\n",
"<docstring token>\n<import token>\nDIVERSITY_SAMPLES = [0.2, 0.5, 1.0, 1.2]\nSEED_LENGTH = 40\nSEED_CHARSET = list('abcdefghijklmnopqrstuvwxyz')\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef _load_model(role):\n model_file = 'weights_{}.hdf5'.format(role)\n return keras.models.load_model(model_file)\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\ndef _main():\n args = _setup_args()\n role = args.role.lower()\n if role not in ['rory', 'lorelai']:\n raise ValueError('Expected Rory or Lorelai')\n model = _load_model(role)\n chars = _load_lines(role)\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n seed = args.seed\n if len(seed) > SEED_LENGTH:\n seed = seed[0:SEED_LENGTH]\n elif len(seed) < SEED_LENGTH:\n raise ValueError('Seed needs to have {} characters.'.format(\n SEED_LENGTH))\n seed = seed.lower()\n div = [float(value) for value in args.diversity]\n result = {'role': role, 'seed': seed, 'diversity': div, 'result': {}}\n for diversity in div:\n generated = ''\n sentence = seed\n for i in range(args.output_length):\n x_pred = np.zeros((1, SEED_LENGTH, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.0\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = _sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n result['result'][diversity] = seed + generated\n print(json.dumps(result))\n\n\nif __name__ == '__main__':\n _main()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef _load_model(role):\n model_file = 'weights_{}.hdf5'.format(role)\n return keras.models.load_model(model_file)\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\ndef _main():\n args = _setup_args()\n role = args.role.lower()\n if role not in ['rory', 'lorelai']:\n raise ValueError('Expected Rory or Lorelai')\n model = _load_model(role)\n chars = _load_lines(role)\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n seed = args.seed\n if len(seed) > SEED_LENGTH:\n seed = seed[0:SEED_LENGTH]\n elif len(seed) < SEED_LENGTH:\n raise ValueError('Seed needs to have {} characters.'.format(\n SEED_LENGTH))\n seed = seed.lower()\n div = [float(value) for value in args.diversity]\n result = {'role': role, 'seed': seed, 'diversity': div, 'result': {}}\n for diversity in div:\n generated = ''\n sentence = seed\n for i in range(args.output_length):\n x_pred = np.zeros((1, SEED_LENGTH, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.0\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = _sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n result['result'][diversity] = seed + generated\n print(json.dumps(result))\n\n\nif __name__ == '__main__':\n _main()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef _load_model(role):\n model_file = 'weights_{}.hdf5'.format(role)\n return keras.models.load_model(model_file)\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\ndef _main():\n args = _setup_args()\n role = args.role.lower()\n if role not in ['rory', 'lorelai']:\n raise ValueError('Expected Rory or Lorelai')\n model = _load_model(role)\n chars = _load_lines(role)\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n seed = args.seed\n if len(seed) > SEED_LENGTH:\n seed = seed[0:SEED_LENGTH]\n elif len(seed) < SEED_LENGTH:\n raise ValueError('Seed needs to have {} characters.'.format(\n SEED_LENGTH))\n seed = seed.lower()\n div = [float(value) for value in args.diversity]\n result = {'role': role, 'seed': seed, 'diversity': div, 'result': {}}\n for diversity in div:\n generated = ''\n sentence = seed\n for i in range(args.output_length):\n x_pred = np.zeros((1, SEED_LENGTH, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.0\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = _sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n result['result'][diversity] = seed + generated\n print(json.dumps(result))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\n<function token>\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\ndef _main():\n args = _setup_args()\n role = args.role.lower()\n if role not in ['rory', 'lorelai']:\n raise ValueError('Expected Rory or Lorelai')\n model = _load_model(role)\n chars = _load_lines(role)\n char_indices = dict((c, i) for i, c in enumerate(chars))\n indices_char = dict((i, c) for i, c in enumerate(chars))\n seed = args.seed\n if len(seed) > SEED_LENGTH:\n seed = seed[0:SEED_LENGTH]\n elif len(seed) < SEED_LENGTH:\n raise ValueError('Seed needs to have {} characters.'.format(\n SEED_LENGTH))\n seed = seed.lower()\n div = [float(value) for value in args.diversity]\n result = {'role': role, 'seed': seed, 'diversity': div, 'result': {}}\n for diversity in div:\n generated = ''\n sentence = seed\n for i in range(args.output_length):\n x_pred = np.zeros((1, SEED_LENGTH, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.0\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = _sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n result['result'][diversity] = seed + generated\n print(json.dumps(result))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\n<function token>\n\n\ndef _load_lines(role):\n data = []\n with open('../scrape/script.jl') as f:\n for line in f:\n data.append(json.loads(line))\n role_subset = [item for item in data if item['actor'].lower() == role]\n role_dataframe = pd.DataFrame(role_subset)\n role_line = ' '.join(map(str, role_dataframe.line)).lower()\n chars = sorted(list(set(role_line)))\n return chars\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef _setup_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--role', default='rory', help=\n 'Role (Rory or Lorelai)', required=True)\n parser.add_argument('--seed', help='Random seed', required=True)\n parser.add_argument('--diversity', nargs='+', default=DIVERSITY_SAMPLES,\n help='Diversity')\n parser.add_argument('--output-length', default=400, type=int, help=\n 'Output sentence length')\n return parser.parse_args()\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef _sample(preds, temperature=1.0):\n \"\"\"\n helper function to sample an index from a probability array\n \"\"\"\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,444 |
f707e3bd3e33b93a23426c7d27159583404da766
|
""" Create CW Dashboards for all MWAA environments """
import datetime
import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
logger = Logger()
tracer = Tracer()
metrics = Metrics()
cloudwatch = boto3.client("cloudwatch")
mwaa = boto3.client("mwaa")
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["DASHBOARD_TEMPLATE_TABLE"])
response = table.get_item(Key={"id": "1"})
dashboard_template = response["Item"]["data"]
def default(o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context(log_event=True)
@tracer.capture_lambda_handler
def lambda_handler(event, context):
logger.info(json.dumps(event, indent=2, default=default))
mwaa_environments = mwaa.list_environments()["Environments"]
logger.info(f"Airflow environments: {json.dumps(mwaa_environments, indent=2)}")
response = cloudwatch.list_dashboards(DashboardNamePrefix="Airflow-")
logger.info(json.dumps(response, indent=2, default=default))
# Create or update dashboards for new/existing environments
for env in mwaa_environments:
dashboard_name = f"Airflow-{env}"
dashboard_body = dashboard_template.replace(
"${AWS::Region}", os.getenv("AWS_REGION", "us-east-1")
).replace("${EnvironmentName}", env)
logger.info(f"Creating/updating dashboard: {dashboard_name}")
logger.debug(dashboard_body)
response = cloudwatch.put_dashboard(
DashboardName=dashboard_name, DashboardBody=dashboard_body
)
logger.info(json.dumps(response, indent=2))
|
[
"\"\"\" Create CW Dashboards for all MWAA environments \"\"\"\n\nimport datetime\nimport json\nimport os\n\nimport boto3\nfrom aws_lambda_powertools import Logger, Metrics, Tracer\n\nlogger = Logger()\ntracer = Tracer()\nmetrics = Metrics()\n\ncloudwatch = boto3.client(\"cloudwatch\")\nmwaa = boto3.client(\"mwaa\")\n\ndynamodb = boto3.resource(\"dynamodb\")\ntable = dynamodb.Table(os.environ[\"DASHBOARD_TEMPLATE_TABLE\"])\nresponse = table.get_item(Key={\"id\": \"1\"})\ndashboard_template = response[\"Item\"][\"data\"]\n\n\ndef default(o):\n if isinstance(o, (datetime.date, datetime.datetime)):\n return o.isoformat()\n\n\[email protected]_metrics(capture_cold_start_metric=True)\[email protected]_lambda_context(log_event=True)\[email protected]_lambda_handler\ndef lambda_handler(event, context):\n\n logger.info(json.dumps(event, indent=2, default=default))\n\n mwaa_environments = mwaa.list_environments()[\"Environments\"]\n\n logger.info(f\"Airflow environments: {json.dumps(mwaa_environments, indent=2)}\")\n\n response = cloudwatch.list_dashboards(DashboardNamePrefix=\"Airflow-\")\n logger.info(json.dumps(response, indent=2, default=default))\n\n # Create or update dashboards for new/existing environments\n for env in mwaa_environments:\n\n dashboard_name = f\"Airflow-{env}\"\n\n dashboard_body = dashboard_template.replace(\n \"${AWS::Region}\", os.getenv(\"AWS_REGION\", \"us-east-1\")\n ).replace(\"${EnvironmentName}\", env)\n\n logger.info(f\"Creating/updating dashboard: {dashboard_name}\")\n logger.debug(dashboard_body)\n\n response = cloudwatch.put_dashboard(\n DashboardName=dashboard_name, DashboardBody=dashboard_body\n )\n\n logger.info(json.dumps(response, indent=2))\n",
"<docstring token>\nimport datetime\nimport json\nimport os\nimport boto3\nfrom aws_lambda_powertools import Logger, Metrics, Tracer\nlogger = Logger()\ntracer = Tracer()\nmetrics = Metrics()\ncloudwatch = boto3.client('cloudwatch')\nmwaa = boto3.client('mwaa')\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table(os.environ['DASHBOARD_TEMPLATE_TABLE'])\nresponse = table.get_item(Key={'id': '1'})\ndashboard_template = response['Item']['data']\n\n\ndef default(o):\n if isinstance(o, (datetime.date, datetime.datetime)):\n return o.isoformat()\n\n\[email protected]_metrics(capture_cold_start_metric=True)\[email protected]_lambda_context(log_event=True)\[email protected]_lambda_handler\ndef lambda_handler(event, context):\n logger.info(json.dumps(event, indent=2, default=default))\n mwaa_environments = mwaa.list_environments()['Environments']\n logger.info(\n f'Airflow environments: {json.dumps(mwaa_environments, indent=2)}')\n response = cloudwatch.list_dashboards(DashboardNamePrefix='Airflow-')\n logger.info(json.dumps(response, indent=2, default=default))\n for env in mwaa_environments:\n dashboard_name = f'Airflow-{env}'\n dashboard_body = dashboard_template.replace('${AWS::Region}', os.\n getenv('AWS_REGION', 'us-east-1')).replace('${EnvironmentName}',\n env)\n logger.info(f'Creating/updating dashboard: {dashboard_name}')\n logger.debug(dashboard_body)\n response = cloudwatch.put_dashboard(DashboardName=dashboard_name,\n DashboardBody=dashboard_body)\n logger.info(json.dumps(response, indent=2))\n",
"<docstring token>\n<import token>\nlogger = Logger()\ntracer = Tracer()\nmetrics = Metrics()\ncloudwatch = boto3.client('cloudwatch')\nmwaa = boto3.client('mwaa')\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table(os.environ['DASHBOARD_TEMPLATE_TABLE'])\nresponse = table.get_item(Key={'id': '1'})\ndashboard_template = response['Item']['data']\n\n\ndef default(o):\n if isinstance(o, (datetime.date, datetime.datetime)):\n return o.isoformat()\n\n\[email protected]_metrics(capture_cold_start_metric=True)\[email protected]_lambda_context(log_event=True)\[email protected]_lambda_handler\ndef lambda_handler(event, context):\n logger.info(json.dumps(event, indent=2, default=default))\n mwaa_environments = mwaa.list_environments()['Environments']\n logger.info(\n f'Airflow environments: {json.dumps(mwaa_environments, indent=2)}')\n response = cloudwatch.list_dashboards(DashboardNamePrefix='Airflow-')\n logger.info(json.dumps(response, indent=2, default=default))\n for env in mwaa_environments:\n dashboard_name = f'Airflow-{env}'\n dashboard_body = dashboard_template.replace('${AWS::Region}', os.\n getenv('AWS_REGION', 'us-east-1')).replace('${EnvironmentName}',\n env)\n logger.info(f'Creating/updating dashboard: {dashboard_name}')\n logger.debug(dashboard_body)\n response = cloudwatch.put_dashboard(DashboardName=dashboard_name,\n DashboardBody=dashboard_body)\n logger.info(json.dumps(response, indent=2))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef default(o):\n if isinstance(o, (datetime.date, datetime.datetime)):\n return o.isoformat()\n\n\[email protected]_metrics(capture_cold_start_metric=True)\[email protected]_lambda_context(log_event=True)\[email protected]_lambda_handler\ndef lambda_handler(event, context):\n logger.info(json.dumps(event, indent=2, default=default))\n mwaa_environments = mwaa.list_environments()['Environments']\n logger.info(\n f'Airflow environments: {json.dumps(mwaa_environments, indent=2)}')\n response = cloudwatch.list_dashboards(DashboardNamePrefix='Airflow-')\n logger.info(json.dumps(response, indent=2, default=default))\n for env in mwaa_environments:\n dashboard_name = f'Airflow-{env}'\n dashboard_body = dashboard_template.replace('${AWS::Region}', os.\n getenv('AWS_REGION', 'us-east-1')).replace('${EnvironmentName}',\n env)\n logger.info(f'Creating/updating dashboard: {dashboard_name}')\n logger.debug(dashboard_body)\n response = cloudwatch.put_dashboard(DashboardName=dashboard_name,\n DashboardBody=dashboard_body)\n logger.info(json.dumps(response, indent=2))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef default(o):\n if isinstance(o, (datetime.date, datetime.datetime)):\n return o.isoformat()\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
98,445 |
f1a00a2bd52ca479126f561d533321b163764c38
|
# Generated by Django 2.0.3 on 2020-01-18 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0002_auto_20200118_1313'),
]
operations = [
migrations.AlterField(
model_name='player',
name='position',
field=models.IntegerField(choices=[(0, 'QB'), (1, 'RB'), (2, 'FB'), (3, 'WR'), (4, 'TE'), (5, 'C'), (6, 'LG'), (7, 'LT'), (8, 'RG'), (9, 'RT'), (10, 'LDT'), (11, 'LDE'), (12, 'RDT'), (13, 'RDE'), (14, 'WLB'), (15, 'MLB'), (16, 'SLB'), (17, 'LCB'), (18, 'RCB'), (19, 'FS'), (20, 'SS'), (21, 'PK'), (22, 'P'), (23, 'H'), (24, 'PR'), (25, 'KR'), (26, 'LS')], default=0),
),
]
|
[
"# Generated by Django 2.0.3 on 2020-01-18 18:40\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('teams', '0002_auto_20200118_1313'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='player',\r\n name='position',\r\n field=models.IntegerField(choices=[(0, 'QB'), (1, 'RB'), (2, 'FB'), (3, 'WR'), (4, 'TE'), (5, 'C'), (6, 'LG'), (7, 'LT'), (8, 'RG'), (9, 'RT'), (10, 'LDT'), (11, 'LDE'), (12, 'RDT'), (13, 'RDE'), (14, 'WLB'), (15, 'MLB'), (16, 'SLB'), (17, 'LCB'), (18, 'RCB'), (19, 'FS'), (20, 'SS'), (21, 'PK'), (22, 'P'), (23, 'H'), (24, 'PR'), (25, 'KR'), (26, 'LS')], default=0),\r\n ),\r\n ]\r\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('teams', '0002_auto_20200118_1313')]\n operations = [migrations.AlterField(model_name='player', name=\n 'position', field=models.IntegerField(choices=[(0, 'QB'), (1, 'RB'),\n (2, 'FB'), (3, 'WR'), (4, 'TE'), (5, 'C'), (6, 'LG'), (7, 'LT'), (8,\n 'RG'), (9, 'RT'), (10, 'LDT'), (11, 'LDE'), (12, 'RDT'), (13, 'RDE'\n ), (14, 'WLB'), (15, 'MLB'), (16, 'SLB'), (17, 'LCB'), (18, 'RCB'),\n (19, 'FS'), (20, 'SS'), (21, 'PK'), (22, 'P'), (23, 'H'), (24, 'PR'\n ), (25, 'KR'), (26, 'LS')], default=0))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('teams', '0002_auto_20200118_1313')]\n operations = [migrations.AlterField(model_name='player', name=\n 'position', field=models.IntegerField(choices=[(0, 'QB'), (1, 'RB'),\n (2, 'FB'), (3, 'WR'), (4, 'TE'), (5, 'C'), (6, 'LG'), (7, 'LT'), (8,\n 'RG'), (9, 'RT'), (10, 'LDT'), (11, 'LDE'), (12, 'RDT'), (13, 'RDE'\n ), (14, 'WLB'), (15, 'MLB'), (16, 'SLB'), (17, 'LCB'), (18, 'RCB'),\n (19, 'FS'), (20, 'SS'), (21, 'PK'), (22, 'P'), (23, 'H'), (24, 'PR'\n ), (25, 'KR'), (26, 'LS')], default=0))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,446 |
50c5630b1c2107d0c9e446c82a144cdd1e724dda
|
# bitmask_file = "bitmask_test.txt"
bitmask_file = "bitmask.txt"
def get_file_data(filename: str) -> list:
with open(filename, "r") as file:
return [line.strip().split(" ") for line in file]
def write_to_memory(bit_list: list) -> dict:
mask = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
memory_dict = {}
for element in bit_list:
if element[0] == "mask":
mask = element[2]
else:
memory_location = element[0]
bits = "{:036b}".format(int(element[2]))
masked_bits = []
for i, j in zip(mask, bits):
if i == "X":
masked_bits.append(j)
else:
masked_bits.append(i)
memory_dict[memory_location] = int("".join(masked_bits), 2)
return memory_dict
def main():
# extract data
bitmask_list = get_file_data(bitmask_file)
# run masking
memory = write_to_memory(bitmask_list)
print(memory)
memory_sum = sum(memory.values())
print(memory_sum)
if __name__ == '__main__':
main()
|
[
"\n# bitmask_file = \"bitmask_test.txt\"\nbitmask_file = \"bitmask.txt\"\n\n\ndef get_file_data(filename: str) -> list:\n with open(filename, \"r\") as file:\n return [line.strip().split(\" \") for line in file]\n\n\ndef write_to_memory(bit_list: list) -> dict:\n mask = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n memory_dict = {}\n for element in bit_list:\n if element[0] == \"mask\":\n mask = element[2]\n else:\n memory_location = element[0]\n bits = \"{:036b}\".format(int(element[2]))\n masked_bits = []\n for i, j in zip(mask, bits):\n if i == \"X\":\n masked_bits.append(j)\n else:\n masked_bits.append(i)\n memory_dict[memory_location] = int(\"\".join(masked_bits), 2)\n return memory_dict\n\n\ndef main():\n # extract data\n bitmask_list = get_file_data(bitmask_file)\n # run masking\n memory = write_to_memory(bitmask_list)\n print(memory)\n memory_sum = sum(memory.values())\n print(memory_sum)\n\n\nif __name__ == '__main__':\n main()\n\n",
"bitmask_file = 'bitmask.txt'\n\n\ndef get_file_data(filename: str) ->list:\n with open(filename, 'r') as file:\n return [line.strip().split(' ') for line in file]\n\n\ndef write_to_memory(bit_list: list) ->dict:\n mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n memory_dict = {}\n for element in bit_list:\n if element[0] == 'mask':\n mask = element[2]\n else:\n memory_location = element[0]\n bits = '{:036b}'.format(int(element[2]))\n masked_bits = []\n for i, j in zip(mask, bits):\n if i == 'X':\n masked_bits.append(j)\n else:\n masked_bits.append(i)\n memory_dict[memory_location] = int(''.join(masked_bits), 2)\n return memory_dict\n\n\ndef main():\n bitmask_list = get_file_data(bitmask_file)\n memory = write_to_memory(bitmask_list)\n print(memory)\n memory_sum = sum(memory.values())\n print(memory_sum)\n\n\nif __name__ == '__main__':\n main()\n",
"<assignment token>\n\n\ndef get_file_data(filename: str) ->list:\n with open(filename, 'r') as file:\n return [line.strip().split(' ') for line in file]\n\n\ndef write_to_memory(bit_list: list) ->dict:\n mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n memory_dict = {}\n for element in bit_list:\n if element[0] == 'mask':\n mask = element[2]\n else:\n memory_location = element[0]\n bits = '{:036b}'.format(int(element[2]))\n masked_bits = []\n for i, j in zip(mask, bits):\n if i == 'X':\n masked_bits.append(j)\n else:\n masked_bits.append(i)\n memory_dict[memory_location] = int(''.join(masked_bits), 2)\n return memory_dict\n\n\ndef main():\n bitmask_list = get_file_data(bitmask_file)\n memory = write_to_memory(bitmask_list)\n print(memory)\n memory_sum = sum(memory.values())\n print(memory_sum)\n\n\nif __name__ == '__main__':\n main()\n",
"<assignment token>\n\n\ndef get_file_data(filename: str) ->list:\n with open(filename, 'r') as file:\n return [line.strip().split(' ') for line in file]\n\n\ndef write_to_memory(bit_list: list) ->dict:\n mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n memory_dict = {}\n for element in bit_list:\n if element[0] == 'mask':\n mask = element[2]\n else:\n memory_location = element[0]\n bits = '{:036b}'.format(int(element[2]))\n masked_bits = []\n for i, j in zip(mask, bits):\n if i == 'X':\n masked_bits.append(j)\n else:\n masked_bits.append(i)\n memory_dict[memory_location] = int(''.join(masked_bits), 2)\n return memory_dict\n\n\ndef main():\n bitmask_list = get_file_data(bitmask_file)\n memory = write_to_memory(bitmask_list)\n print(memory)\n memory_sum = sum(memory.values())\n print(memory_sum)\n\n\n<code token>\n",
"<assignment token>\n\n\ndef get_file_data(filename: str) ->list:\n with open(filename, 'r') as file:\n return [line.strip().split(' ') for line in file]\n\n\n<function token>\n\n\ndef main():\n bitmask_list = get_file_data(bitmask_file)\n memory = write_to_memory(bitmask_list)\n print(memory)\n memory_sum = sum(memory.values())\n print(memory_sum)\n\n\n<code token>\n",
"<assignment token>\n<function token>\n<function token>\n\n\ndef main():\n bitmask_list = get_file_data(bitmask_file)\n memory = write_to_memory(bitmask_list)\n print(memory)\n memory_sum = sum(memory.values())\n print(memory_sum)\n\n\n<code token>\n",
"<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,447 |
25001c9fe6c088b58f81c9d7eb32f887cd1ca41a
|
@api.onchange('attribute_id')
def attribute_id_change(self):
if self.attribute_id:
self.attribute = self.attribute_id.attribute
group_id = fields.Many2one('radius.group', string='Group')
attribute_id = fields.Many2one('radius.attribute', string='Atribute Search')
|
[
" @api.onchange('attribute_id')\n def attribute_id_change(self):\n if self.attribute_id:\n self.attribute = self.attribute_id.attribute\n \n group_id = fields.Many2one('radius.group', string='Group')\n attribute_id = fields.Many2one('radius.attribute', string='Atribute Search')"
] | true |
98,448 |
be985ba4cf4a65d113e4d3058c4c426debb965f6
|
#!/usr/bin/env python
"""VideoMega.tv Service Code"""
from unpacker import unpack as Unpack
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
RE_NORM = Regex(r'(https?\:\/\/(?:www\.)?videomega\.\w+\/)(?:(?:view|iframe|cdn|validatehash)\.php)?\?(?:ref|hashkey)\=(\w+)')
RE_TITLE = Regex(r'(?i)(?:^videomega\.\w+\s*?\-\s*?|\s*?\-\s*?videomega\.\w+$)')
RE_PACKED = Regex(r"(eval\(function\(p,a,c,k,e,.+\|src\|.+\.split\('\|'\).*?\)\))")
RE_SRC = Regex(r'"src"\s*?,\s*?"([^"]+)"')
FALLBACK = 'http://i.imgur.com/75YO83o.jpg'
STITLE = 'VideoMega'
####################################################################################################
def NormalizeURL(url):
r = RE_NORM.search(url)
return r.group(1) + 'view.php?ref=' + r.group(2) if r else url
####################################################################################################
def MetadataObjectForURL(url):
try:
html = HTML.ElementFromURL(url)
except:
Log.Exception(u"* Error: Cannot open '{0}' >>>".format(url))
raise Ex.MediaNotAvailable
title = html.xpath('//title/text()')
if not title:
raise Ex.MediaExpired
title = RE_TITLE.sub(r'', title[0]).strip()
thumb = html.xpath('//video/@poster')
thumb = thumb[0] if thumb else FALLBACK
return VideoClipObject(
title=title if title else get_fallback_title(url),
thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),
source_title=STITLE
)
####################################################################################################
def MediaObjectsForURL(url):
return [
MediaObject(
video_codec=VideoCodec.H264,
audio_codec=AudioCodec.AAC,
audio_channels=2,
optimized_for_streaming=True,
parts=[
PartObject(key=Callback(PlayVideo, url=url))
]
)
]
####################################################################################################
@indirect
def PlayVideo(url, **kwargs):
http_headers = {'User-Agent': USER_AGENT, 'Referer': url, 'Cookie': 'noadvtday=0'}
url = url.replace('/view.php', '/cdn.php')
try:
page = HTTP.Request(url, headers=http_headers, cacheTime=10).content
except:
Log.Exception(u"* Error: Cannot open '{0}' >>>".format(url))
raise Ex.MediaNotAvailable
packed = RE_PACKED.search(page)
if packed:
data = Unpack(packed.group(1))
vurl = RE_SRC.search(data)
if vurl:
Log.Debug(u"* PlayVideo URL = {0}".format(vurl.group(1)))
return IndirectResponse(VideoClipObject, key=vurl.group(1))
raise Ex.MediaNotAvailable
####################################################################################################
def get_fallback_title(url):
return u"{0} | {1}".format(STITLE, url.rsplit('?ref=', 1)[1])
|
[
"#!/usr/bin/env python\n\n\"\"\"VideoMega.tv Service Code\"\"\"\n\nfrom unpacker import unpack as Unpack\n\nUSER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\nRE_NORM = Regex(r'(https?\\:\\/\\/(?:www\\.)?videomega\\.\\w+\\/)(?:(?:view|iframe|cdn|validatehash)\\.php)?\\?(?:ref|hashkey)\\=(\\w+)')\nRE_TITLE = Regex(r'(?i)(?:^videomega\\.\\w+\\s*?\\-\\s*?|\\s*?\\-\\s*?videomega\\.\\w+$)')\nRE_PACKED = Regex(r\"(eval\\(function\\(p,a,c,k,e,.+\\|src\\|.+\\.split\\('\\|'\\).*?\\)\\))\")\nRE_SRC = Regex(r'\"src\"\\s*?,\\s*?\"([^\"]+)\"')\nFALLBACK = 'http://i.imgur.com/75YO83o.jpg'\nSTITLE = 'VideoMega'\n\n####################################################################################################\ndef NormalizeURL(url):\n\n r = RE_NORM.search(url)\n return r.group(1) + 'view.php?ref=' + r.group(2) if r else url\n\n####################################################################################################\ndef MetadataObjectForURL(url):\n\n try:\n html = HTML.ElementFromURL(url)\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n\n title = html.xpath('//title/text()')\n if not title:\n raise Ex.MediaExpired\n title = RE_TITLE.sub(r'', title[0]).strip()\n\n thumb = html.xpath('//video/@poster')\n thumb = thumb[0] if thumb else FALLBACK\n\n return VideoClipObject(\n title=title if title else get_fallback_title(url),\n thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),\n source_title=STITLE\n )\n\n####################################################################################################\ndef MediaObjectsForURL(url):\n\n return [\n MediaObject(\n video_codec=VideoCodec.H264,\n audio_codec=AudioCodec.AAC,\n audio_channels=2,\n optimized_for_streaming=True,\n parts=[\n PartObject(key=Callback(PlayVideo, url=url))\n ]\n )\n ]\n\n####################################################################################################\n@indirect\ndef PlayVideo(url, **kwargs):\n\n http_headers = {'User-Agent': USER_AGENT, 'Referer': url, 'Cookie': 'noadvtday=0'}\n url = url.replace('/view.php', '/cdn.php')\n\n try:\n page = HTTP.Request(url, headers=http_headers, cacheTime=10).content\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n\n packed = RE_PACKED.search(page)\n if packed:\n data = Unpack(packed.group(1))\n vurl = RE_SRC.search(data)\n if vurl:\n Log.Debug(u\"* PlayVideo URL = {0}\".format(vurl.group(1)))\n return IndirectResponse(VideoClipObject, key=vurl.group(1))\n\n raise Ex.MediaNotAvailable\n\n####################################################################################################\ndef get_fallback_title(url):\n return u\"{0} | {1}\".format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\nfrom unpacker import unpack as Unpack\nUSER_AGENT = (\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\n )\nRE_NORM = Regex(\n '(https?\\\\:\\\\/\\\\/(?:www\\\\.)?videomega\\\\.\\\\w+\\\\/)(?:(?:view|iframe|cdn|validatehash)\\\\.php)?\\\\?(?:ref|hashkey)\\\\=(\\\\w+)'\n )\nRE_TITLE = Regex(\n '(?i)(?:^videomega\\\\.\\\\w+\\\\s*?\\\\-\\\\s*?|\\\\s*?\\\\-\\\\s*?videomega\\\\.\\\\w+$)')\nRE_PACKED = Regex(\n \"(eval\\\\(function\\\\(p,a,c,k,e,.+\\\\|src\\\\|.+\\\\.split\\\\('\\\\|'\\\\).*?\\\\)\\\\))\")\nRE_SRC = Regex('\"src\"\\\\s*?,\\\\s*?\"([^\"]+)\"')\nFALLBACK = 'http://i.imgur.com/75YO83o.jpg'\nSTITLE = 'VideoMega'\n\n\ndef NormalizeURL(url):\n r = RE_NORM.search(url)\n return r.group(1) + 'view.php?ref=' + r.group(2) if r else url\n\n\ndef MetadataObjectForURL(url):\n try:\n html = HTML.ElementFromURL(url)\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n title = html.xpath('//title/text()')\n if not title:\n raise Ex.MediaExpired\n title = RE_TITLE.sub('', title[0]).strip()\n thumb = html.xpath('//video/@poster')\n thumb = thumb[0] if thumb else FALLBACK\n return VideoClipObject(title=title if title else get_fallback_title(url\n ), thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),\n source_title=STITLE)\n\n\ndef MediaObjectsForURL(url):\n return [MediaObject(video_codec=VideoCodec.H264, audio_codec=AudioCodec\n .AAC, audio_channels=2, optimized_for_streaming=True, parts=[\n PartObject(key=Callback(PlayVideo, url=url))])]\n\n\n@indirect\ndef PlayVideo(url, **kwargs):\n http_headers = {'User-Agent': USER_AGENT, 'Referer': url, 'Cookie':\n 'noadvtday=0'}\n url = url.replace('/view.php', '/cdn.php')\n try:\n page = HTTP.Request(url, headers=http_headers, cacheTime=10).content\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n packed = RE_PACKED.search(page)\n if packed:\n data = Unpack(packed.group(1))\n vurl = RE_SRC.search(data)\n if vurl:\n Log.Debug(u'* PlayVideo URL = {0}'.format(vurl.group(1)))\n return IndirectResponse(VideoClipObject, key=vurl.group(1))\n raise Ex.MediaNotAvailable\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\nUSER_AGENT = (\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\n )\nRE_NORM = Regex(\n '(https?\\\\:\\\\/\\\\/(?:www\\\\.)?videomega\\\\.\\\\w+\\\\/)(?:(?:view|iframe|cdn|validatehash)\\\\.php)?\\\\?(?:ref|hashkey)\\\\=(\\\\w+)'\n )\nRE_TITLE = Regex(\n '(?i)(?:^videomega\\\\.\\\\w+\\\\s*?\\\\-\\\\s*?|\\\\s*?\\\\-\\\\s*?videomega\\\\.\\\\w+$)')\nRE_PACKED = Regex(\n \"(eval\\\\(function\\\\(p,a,c,k,e,.+\\\\|src\\\\|.+\\\\.split\\\\('\\\\|'\\\\).*?\\\\)\\\\))\")\nRE_SRC = Regex('\"src\"\\\\s*?,\\\\s*?\"([^\"]+)\"')\nFALLBACK = 'http://i.imgur.com/75YO83o.jpg'\nSTITLE = 'VideoMega'\n\n\ndef NormalizeURL(url):\n r = RE_NORM.search(url)\n return r.group(1) + 'view.php?ref=' + r.group(2) if r else url\n\n\ndef MetadataObjectForURL(url):\n try:\n html = HTML.ElementFromURL(url)\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n title = html.xpath('//title/text()')\n if not title:\n raise Ex.MediaExpired\n title = RE_TITLE.sub('', title[0]).strip()\n thumb = html.xpath('//video/@poster')\n thumb = thumb[0] if thumb else FALLBACK\n return VideoClipObject(title=title if title else get_fallback_title(url\n ), thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),\n source_title=STITLE)\n\n\ndef MediaObjectsForURL(url):\n return [MediaObject(video_codec=VideoCodec.H264, audio_codec=AudioCodec\n .AAC, audio_channels=2, optimized_for_streaming=True, parts=[\n PartObject(key=Callback(PlayVideo, url=url))])]\n\n\n@indirect\ndef PlayVideo(url, **kwargs):\n http_headers = {'User-Agent': USER_AGENT, 'Referer': url, 'Cookie':\n 'noadvtday=0'}\n url = url.replace('/view.php', '/cdn.php')\n try:\n page = HTTP.Request(url, headers=http_headers, cacheTime=10).content\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n packed = RE_PACKED.search(page)\n if packed:\n data = Unpack(packed.group(1))\n vurl = RE_SRC.search(data)\n if vurl:\n Log.Debug(u'* PlayVideo URL = {0}'.format(vurl.group(1)))\n return IndirectResponse(VideoClipObject, key=vurl.group(1))\n raise Ex.MediaNotAvailable\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef NormalizeURL(url):\n r = RE_NORM.search(url)\n return r.group(1) + 'view.php?ref=' + r.group(2) if r else url\n\n\ndef MetadataObjectForURL(url):\n try:\n html = HTML.ElementFromURL(url)\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n title = html.xpath('//title/text()')\n if not title:\n raise Ex.MediaExpired\n title = RE_TITLE.sub('', title[0]).strip()\n thumb = html.xpath('//video/@poster')\n thumb = thumb[0] if thumb else FALLBACK\n return VideoClipObject(title=title if title else get_fallback_title(url\n ), thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),\n source_title=STITLE)\n\n\ndef MediaObjectsForURL(url):\n return [MediaObject(video_codec=VideoCodec.H264, audio_codec=AudioCodec\n .AAC, audio_channels=2, optimized_for_streaming=True, parts=[\n PartObject(key=Callback(PlayVideo, url=url))])]\n\n\n@indirect\ndef PlayVideo(url, **kwargs):\n http_headers = {'User-Agent': USER_AGENT, 'Referer': url, 'Cookie':\n 'noadvtday=0'}\n url = url.replace('/view.php', '/cdn.php')\n try:\n page = HTTP.Request(url, headers=http_headers, cacheTime=10).content\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n packed = RE_PACKED.search(page)\n if packed:\n data = Unpack(packed.group(1))\n vurl = RE_SRC.search(data)\n if vurl:\n Log.Debug(u'* PlayVideo URL = {0}'.format(vurl.group(1)))\n return IndirectResponse(VideoClipObject, key=vurl.group(1))\n raise Ex.MediaNotAvailable\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef NormalizeURL(url):\n r = RE_NORM.search(url)\n return r.group(1) + 'view.php?ref=' + r.group(2) if r else url\n\n\ndef MetadataObjectForURL(url):\n try:\n html = HTML.ElementFromURL(url)\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n title = html.xpath('//title/text()')\n if not title:\n raise Ex.MediaExpired\n title = RE_TITLE.sub('', title[0]).strip()\n thumb = html.xpath('//video/@poster')\n thumb = thumb[0] if thumb else FALLBACK\n return VideoClipObject(title=title if title else get_fallback_title(url\n ), thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),\n source_title=STITLE)\n\n\ndef MediaObjectsForURL(url):\n return [MediaObject(video_codec=VideoCodec.H264, audio_codec=AudioCodec\n .AAC, audio_channels=2, optimized_for_streaming=True, parts=[\n PartObject(key=Callback(PlayVideo, url=url))])]\n\n\n<function token>\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef MetadataObjectForURL(url):\n try:\n html = HTML.ElementFromURL(url)\n except:\n Log.Exception(u\"* Error: Cannot open '{0}' >>>\".format(url))\n raise Ex.MediaNotAvailable\n title = html.xpath('//title/text()')\n if not title:\n raise Ex.MediaExpired\n title = RE_TITLE.sub('', title[0]).strip()\n thumb = html.xpath('//video/@poster')\n thumb = thumb[0] if thumb else FALLBACK\n return VideoClipObject(title=title if title else get_fallback_title(url\n ), thumb=Resource.ContentsOfURLWithFallback([thumb, FALLBACK]),\n source_title=STITLE)\n\n\ndef MediaObjectsForURL(url):\n return [MediaObject(video_codec=VideoCodec.H264, audio_codec=AudioCodec\n .AAC, audio_channels=2, optimized_for_streaming=True, parts=[\n PartObject(key=Callback(PlayVideo, url=url))])]\n\n\n<function token>\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef MediaObjectsForURL(url):\n return [MediaObject(video_codec=VideoCodec.H264, audio_codec=AudioCodec\n .AAC, audio_channels=2, optimized_for_streaming=True, parts=[\n PartObject(key=Callback(PlayVideo, url=url))])]\n\n\n<function token>\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_fallback_title(url):\n return u'{0} | {1}'.format(STITLE, url.rsplit('?ref=', 1)[1])\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,449 |
9b0f2d82f0cbe12da11180e26594109718a9bb9a
|
# Authenticating API's using Python Automation auth method (access token in header)- Example
import requests
from config.configurations import *
from config.resources import *
gitAccessUrl = get_config()['api']['gitHubUrl']
print(gitAccessUrl)
# userName = get_config()['gitHubCredentials']['userName']
# get password by user, not required as basic auth deprecated on github since 5may 2021
# session manager
with requests.Session() as sessionManager:
HeadAccept = ApiResources.HeadAccept
sessionManager.headers.update(HeadAccept)
token = input('Please enter GitHub access token:\n')
resourceObj = ApiResources(token)
head_authorize = resourceObj.get_auth_token()
sessionManager.headers.update(head_authorize)
# requesting git hub with authorization header
responseGitAuth = sessionManager.get(gitAccessUrl+'/user')
print(responseGitAuth.status_code)
print(responseGitAuth.json())
# accessing octokit org's repo
pathOrgRepos = ApiResources.gitHubRepo # preparing base url using configurations.py
repoUrl = gitAccessUrl+pathOrgRepos # making final url by adding base url and resource path from resources.py
print(repoUrl)
responseUserRepos = sessionManager.get(repoUrl)
print(responseUserRepos.status_code)
# print(responseUserRepos.json())
|
[
"# Authenticating API's using Python Automation auth method (access token in header)- Example\nimport requests\nfrom config.configurations import *\nfrom config.resources import *\n\ngitAccessUrl = get_config()['api']['gitHubUrl']\nprint(gitAccessUrl)\n# userName = get_config()['gitHubCredentials']['userName']\n# get password by user, not required as basic auth deprecated on github since 5may 2021\n# session manager\nwith requests.Session() as sessionManager:\n HeadAccept = ApiResources.HeadAccept\n sessionManager.headers.update(HeadAccept)\n token = input('Please enter GitHub access token:\\n')\n resourceObj = ApiResources(token)\n head_authorize = resourceObj.get_auth_token()\n sessionManager.headers.update(head_authorize)\n # requesting git hub with authorization header\n responseGitAuth = sessionManager.get(gitAccessUrl+'/user')\n print(responseGitAuth.status_code)\n print(responseGitAuth.json())\n\n # accessing octokit org's repo\n pathOrgRepos = ApiResources.gitHubRepo # preparing base url using configurations.py\n repoUrl = gitAccessUrl+pathOrgRepos # making final url by adding base url and resource path from resources.py\n print(repoUrl)\n responseUserRepos = sessionManager.get(repoUrl)\n print(responseUserRepos.status_code)\n # print(responseUserRepos.json())\n",
"import requests\nfrom config.configurations import *\nfrom config.resources import *\ngitAccessUrl = get_config()['api']['gitHubUrl']\nprint(gitAccessUrl)\nwith requests.Session() as sessionManager:\n HeadAccept = ApiResources.HeadAccept\n sessionManager.headers.update(HeadAccept)\n token = input('Please enter GitHub access token:\\n')\n resourceObj = ApiResources(token)\n head_authorize = resourceObj.get_auth_token()\n sessionManager.headers.update(head_authorize)\n responseGitAuth = sessionManager.get(gitAccessUrl + '/user')\n print(responseGitAuth.status_code)\n print(responseGitAuth.json())\n pathOrgRepos = ApiResources.gitHubRepo\n repoUrl = gitAccessUrl + pathOrgRepos\n print(repoUrl)\n responseUserRepos = sessionManager.get(repoUrl)\n print(responseUserRepos.status_code)\n",
"<import token>\ngitAccessUrl = get_config()['api']['gitHubUrl']\nprint(gitAccessUrl)\nwith requests.Session() as sessionManager:\n HeadAccept = ApiResources.HeadAccept\n sessionManager.headers.update(HeadAccept)\n token = input('Please enter GitHub access token:\\n')\n resourceObj = ApiResources(token)\n head_authorize = resourceObj.get_auth_token()\n sessionManager.headers.update(head_authorize)\n responseGitAuth = sessionManager.get(gitAccessUrl + '/user')\n print(responseGitAuth.status_code)\n print(responseGitAuth.json())\n pathOrgRepos = ApiResources.gitHubRepo\n repoUrl = gitAccessUrl + pathOrgRepos\n print(repoUrl)\n responseUserRepos = sessionManager.get(repoUrl)\n print(responseUserRepos.status_code)\n",
"<import token>\n<assignment token>\nprint(gitAccessUrl)\nwith requests.Session() as sessionManager:\n HeadAccept = ApiResources.HeadAccept\n sessionManager.headers.update(HeadAccept)\n token = input('Please enter GitHub access token:\\n')\n resourceObj = ApiResources(token)\n head_authorize = resourceObj.get_auth_token()\n sessionManager.headers.update(head_authorize)\n responseGitAuth = sessionManager.get(gitAccessUrl + '/user')\n print(responseGitAuth.status_code)\n print(responseGitAuth.json())\n pathOrgRepos = ApiResources.gitHubRepo\n repoUrl = gitAccessUrl + pathOrgRepos\n print(repoUrl)\n responseUserRepos = sessionManager.get(repoUrl)\n print(responseUserRepos.status_code)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,450 |
2f5127c7dd98e19d269a7e0f05362e1cb1a7ba6b
|
#
# @Author: German Cano Quiveu, [email protected]
# @Date: 2019-09-21 22:23:48
# @Last Modified by: German Cano Quiveu, [email protected]
# @Last Modified time: 2019-09-21 22:23:48
#
import cocotb
import numpy as np
import time
import linecache
from cocotb.triggers import Timer,RisingEdge, FallingEdge
from cocotb.regression import TestFactory
from cocotb.result import TestFailure, ReturnValue
from cocotb.clock import Clock
import sys
CLK_PERIOD = 20 # 50 MHz
LOOKUP_FILE = "../../rtl/systemVerilog/lookuptable_contents.mem"
#the keyword yield
# Testbenches built using Cocotb use coroutines.
# While the coroutine is executing the simulation is paused.
# The coroutine uses the yield keyword
# to pass control of execution back to
# the simulator and simulation time can advance again.
#
# yield return when the 'Trigger' is resolve
#
# Coroutines may also yield a list of triggers
# to indicate that execution should resume if any of them fires
def setup_function(dut, din):
cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())
dut.addr = din
@cocotb.coroutine
def rst_function_test(dut):
dut.rst = 1
yield n_cycles_clock(dut,10)
if(dut.data != 0xFFFFFFFF):
raise TestFailure("Error rst,wrong data value = %s"
% hex(int(dut.data.value)))
dut.rst = 0
@cocotb.coroutine
def check_lookuptable(dut, din):
with open(LOOKUP_FILE) as fp:
lines = fp.readlines()
yield n_cycles_clock(dut,1)
selected_line = lines[din].replace(" ","")
print(selected_line)
if(dut.data.value.integer != int(selected_line,16)):
raise TestFailure("Error data,wrong value = %s"
% hex(int(dut.data.value)))
@cocotb.coroutine
def n_cycles_clock(dut,n):
for i in range(0,n):
yield RisingEdge(dut.clk)
yield FallingEdge(dut.clk)
@cocotb.coroutine
def run_test(dut, din = 2):
setup_function(dut, din)
yield rst_function_test(dut)
yield check_lookuptable(dut,din)
n = 10
factory = TestFactory(run_test)
factory.add_option("din", np.random.randint(low=0,high=32,size=n)) #array de 10 int aleatorios entre 0 y 31
factory.generate_tests()
|
[
" #\n # @Author: German Cano Quiveu, [email protected] \n # @Date: 2019-09-21 22:23:48 \n # @Last Modified by: German Cano Quiveu, [email protected] \n # @Last Modified time: 2019-09-21 22:23:48 \n #\n\nimport cocotb\nimport numpy as np\nimport time\nimport linecache\nfrom cocotb.triggers import Timer,RisingEdge, FallingEdge\nfrom cocotb.regression import TestFactory\nfrom cocotb.result import TestFailure, ReturnValue\nfrom cocotb.clock import Clock\n\nimport sys\n\nCLK_PERIOD = 20 # 50 MHz\n\nLOOKUP_FILE = \"../../rtl/systemVerilog/lookuptable_contents.mem\"\n\n#the keyword yield\n# Testbenches built using Cocotb use coroutines.\n# While the coroutine is executing the simulation is paused.\n# The coroutine uses the yield keyword\n# to pass control of execution back to\n# the simulator and simulation time can advance again.\n#\n# yield return when the 'Trigger' is resolve\n#\n# Coroutines may also yield a list of triggers\n# to indicate that execution should resume if any of them fires\n\n\ndef setup_function(dut, din):\n cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())\n dut.addr = din\n \n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut,10)\n\n if(dut.data != 0xFFFFFFFF):\n raise TestFailure(\"Error rst,wrong data value = %s\"\n % hex(int(dut.data.value)))\n\n \n\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n \n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n \n \n yield n_cycles_clock(dut,1)\n selected_line = lines[din].replace(\" \",\"\")\n print(selected_line)\n \n if(dut.data.value.integer != int(selected_line,16)):\n raise TestFailure(\"Error data,wrong value = %s\"\n % hex(int(dut.data.value)))\n \n \n\n\n\n\n\[email protected]\ndef n_cycles_clock(dut,n):\n for i in range(0,n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\n\[email protected]\ndef run_test(dut, din = 2):\n setup_function(dut, din)\n yield rst_function_test(dut)\n yield check_lookuptable(dut,din)\n\n\n\nn = 10\nfactory = TestFactory(run_test)\nfactory.add_option(\"din\", np.random.randint(low=0,high=32,size=n)) #array de 10 int aleatorios entre 0 y 31\nfactory.generate_tests()",
"import cocotb\nimport numpy as np\nimport time\nimport linecache\nfrom cocotb.triggers import Timer, RisingEdge, FallingEdge\nfrom cocotb.regression import TestFactory\nfrom cocotb.result import TestFailure, ReturnValue\nfrom cocotb.clock import Clock\nimport sys\nCLK_PERIOD = 20\nLOOKUP_FILE = '../../rtl/systemVerilog/lookuptable_contents.mem'\n\n\ndef setup_function(dut, din):\n cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())\n dut.addr = din\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n yield n_cycles_clock(dut, 1)\n selected_line = lines[din].replace(' ', '')\n print(selected_line)\n if dut.data.value.integer != int(selected_line, 16):\n raise TestFailure('Error data,wrong value = %s' % hex(int(dut.\n data.value)))\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\[email protected]\ndef run_test(dut, din=2):\n setup_function(dut, din)\n yield rst_function_test(dut)\n yield check_lookuptable(dut, din)\n\n\nn = 10\nfactory = TestFactory(run_test)\nfactory.add_option('din', np.random.randint(low=0, high=32, size=n))\nfactory.generate_tests()\n",
"<import token>\nCLK_PERIOD = 20\nLOOKUP_FILE = '../../rtl/systemVerilog/lookuptable_contents.mem'\n\n\ndef setup_function(dut, din):\n cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())\n dut.addr = din\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n yield n_cycles_clock(dut, 1)\n selected_line = lines[din].replace(' ', '')\n print(selected_line)\n if dut.data.value.integer != int(selected_line, 16):\n raise TestFailure('Error data,wrong value = %s' % hex(int(dut.\n data.value)))\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\[email protected]\ndef run_test(dut, din=2):\n setup_function(dut, din)\n yield rst_function_test(dut)\n yield check_lookuptable(dut, din)\n\n\nn = 10\nfactory = TestFactory(run_test)\nfactory.add_option('din', np.random.randint(low=0, high=32, size=n))\nfactory.generate_tests()\n",
"<import token>\n<assignment token>\n\n\ndef setup_function(dut, din):\n cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())\n dut.addr = din\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n yield n_cycles_clock(dut, 1)\n selected_line = lines[din].replace(' ', '')\n print(selected_line)\n if dut.data.value.integer != int(selected_line, 16):\n raise TestFailure('Error data,wrong value = %s' % hex(int(dut.\n data.value)))\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\[email protected]\ndef run_test(dut, din=2):\n setup_function(dut, din)\n yield rst_function_test(dut)\n yield check_lookuptable(dut, din)\n\n\n<assignment token>\nfactory.add_option('din', np.random.randint(low=0, high=32, size=n))\nfactory.generate_tests()\n",
"<import token>\n<assignment token>\n\n\ndef setup_function(dut, din):\n cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())\n dut.addr = din\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n yield n_cycles_clock(dut, 1)\n selected_line = lines[din].replace(' ', '')\n print(selected_line)\n if dut.data.value.integer != int(selected_line, 16):\n raise TestFailure('Error data,wrong value = %s' % hex(int(dut.\n data.value)))\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\[email protected]\ndef run_test(dut, din=2):\n setup_function(dut, din)\n yield rst_function_test(dut)\n yield check_lookuptable(dut, din)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef setup_function(dut, din):\n cocotb.fork(Clock(dut.clk, CLK_PERIOD).start())\n dut.addr = din\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n yield n_cycles_clock(dut, 1)\n selected_line = lines[din].replace(' ', '')\n print(selected_line)\n if dut.data.value.integer != int(selected_line, 16):\n raise TestFailure('Error data,wrong value = %s' % hex(int(dut.\n data.value)))\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\[email protected]\ndef check_lookuptable(dut, din):\n with open(LOOKUP_FILE) as fp:\n lines = fp.readlines()\n yield n_cycles_clock(dut, 1)\n selected_line = lines[din].replace(' ', '')\n print(selected_line)\n if dut.data.value.integer != int(selected_line, 16):\n raise TestFailure('Error data,wrong value = %s' % hex(int(dut.\n data.value)))\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\n<function token>\n\n\[email protected]\ndef n_cycles_clock(dut, n):\n for i in range(0, n):\n yield RisingEdge(dut.clk)\n yield FallingEdge(dut.clk)\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]\ndef rst_function_test(dut):\n dut.rst = 1\n yield n_cycles_clock(dut, 10)\n if dut.data != 4294967295:\n raise TestFailure('Error rst,wrong data value = %s' % hex(int(dut.\n data.value)))\n dut.rst = 0\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,451 |
269b2d783682f90673832ce6c13c4806be4628f0
|
from machine import ADC
import time
import machine
adc = ADC(0)
while True:
lightIntensity = adc.read()
print('read : ', lightIntensity)
time.sleep(1)
|
[
"from machine import ADC\nimport time\nimport machine\n\nadc = ADC(0)\n\nwhile True:\n lightIntensity = adc.read()\n print('read : ', lightIntensity)\n time.sleep(1)\n\n",
"from machine import ADC\nimport time\nimport machine\nadc = ADC(0)\nwhile True:\n lightIntensity = adc.read()\n print('read : ', lightIntensity)\n time.sleep(1)\n",
"<import token>\nadc = ADC(0)\nwhile True:\n lightIntensity = adc.read()\n print('read : ', lightIntensity)\n time.sleep(1)\n",
"<import token>\n<assignment token>\nwhile True:\n lightIntensity = adc.read()\n print('read : ', lightIntensity)\n time.sleep(1)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,452 |
852c9dc2d76a294885810ff3af814ce0c18c78f4
|
from django.conf.urls import url, include
from . import views
app_name = "rcv_list"
urlpatterns = [
url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/$', views.view_files, name="view_files"),
url(r'^$', views.view_dates, name="view_dates"),
url(r'^index/$', views.view_dates, name="index"),
url(r'^all/$', views.all_index, name="all_index"),
url(r'^download_rcv/(?P<rcv_filename>.+)$', views.download_rcv, name="download_rcv"),
url(r'^view_rcv/(?P<rcv_filename>.+)$', views.view_rcv, name="view_rcv"),
# url(r'^upload_file/$', views.upload_file, name="upload"),
url(r'^upload_files/$', views.upload_files, name="upload_files"),
url(r'^check_files_to_model/$', views.check_files_to_model, name='check_files_to_model'),
url(r'delete/$', views.delete_ajax, name='delete'),
url(r'^edit/(?P<filename>.+)$', views.edit_name, name="edit"),
url(r'^edit_list/$', views.view_edit_list, name="edit_list"),
url(r'^search_rcv/?$', views.search_rcv, name="search_rcv"),
url(r'^search_rcv/(?P<rcv>.+)$', views.search_rcv, name="searching_rcv"),
url(r'^edit_file_ajax/$', views.edit_file_ajax, name="edit_file_ajax"),
url(r'search_rcv_ajax', views.search_rcv_ajax, name="search_rcv_ajax"),
url(r'^download_rcv_by_id/(?P<rcv_id>\d+)$', views.download_rcv_by_id, name="download_rcv_by_id"),
url(r'^view_rcv_by_id/(?P<rcv_id>\d+)$', views.view_rcv_by_id, name="view_rcv_by_id"),
]
|
[
"from django.conf.urls import url, include\n\nfrom . import views\n\napp_name = \"rcv_list\"\nurlpatterns = [\n url(r'^(?P<year>\\d{4})/(?P<month>\\d{1,2})/$', views.view_files, name=\"view_files\"),\n url(r'^$', views.view_dates, name=\"view_dates\"),\n\turl(r'^index/$', views.view_dates, name=\"index\"),\n url(r'^all/$', views.all_index, name=\"all_index\"),\n url(r'^download_rcv/(?P<rcv_filename>.+)$', views.download_rcv, name=\"download_rcv\"),\n url(r'^view_rcv/(?P<rcv_filename>.+)$', views.view_rcv, name=\"view_rcv\"),\n # url(r'^upload_file/$', views.upload_file, name=\"upload\"),\n url(r'^upload_files/$', views.upload_files, name=\"upload_files\"),\n url(r'^check_files_to_model/$', views.check_files_to_model, name='check_files_to_model'),\n url(r'delete/$', views.delete_ajax, name='delete'),\n url(r'^edit/(?P<filename>.+)$', views.edit_name, name=\"edit\"),\n\n url(r'^edit_list/$', views.view_edit_list, name=\"edit_list\"),\n url(r'^search_rcv/?$', views.search_rcv, name=\"search_rcv\"),\n url(r'^search_rcv/(?P<rcv>.+)$', views.search_rcv, name=\"searching_rcv\"),\n\n url(r'^edit_file_ajax/$', views.edit_file_ajax, name=\"edit_file_ajax\"),\n url(r'search_rcv_ajax', views.search_rcv_ajax, name=\"search_rcv_ajax\"),\n\n url(r'^download_rcv_by_id/(?P<rcv_id>\\d+)$', views.download_rcv_by_id, name=\"download_rcv_by_id\"),\n url(r'^view_rcv_by_id/(?P<rcv_id>\\d+)$', views.view_rcv_by_id, name=\"view_rcv_by_id\"),\n]",
"from django.conf.urls import url, include\nfrom . import views\napp_name = 'rcv_list'\nurlpatterns = [url('^(?P<year>\\\\d{4})/(?P<month>\\\\d{1,2})/$', views.\n view_files, name='view_files'), url('^$', views.view_dates, name=\n 'view_dates'), url('^index/$', views.view_dates, name='index'), url(\n '^all/$', views.all_index, name='all_index'), url(\n '^download_rcv/(?P<rcv_filename>.+)$', views.download_rcv, name=\n 'download_rcv'), url('^view_rcv/(?P<rcv_filename>.+)$', views.view_rcv,\n name='view_rcv'), url('^upload_files/$', views.upload_files, name=\n 'upload_files'), url('^check_files_to_model/$', views.\n check_files_to_model, name='check_files_to_model'), url('delete/$',\n views.delete_ajax, name='delete'), url('^edit/(?P<filename>.+)$', views\n .edit_name, name='edit'), url('^edit_list/$', views.view_edit_list,\n name='edit_list'), url('^search_rcv/?$', views.search_rcv, name=\n 'search_rcv'), url('^search_rcv/(?P<rcv>.+)$', views.search_rcv, name=\n 'searching_rcv'), url('^edit_file_ajax/$', views.edit_file_ajax, name=\n 'edit_file_ajax'), url('search_rcv_ajax', views.search_rcv_ajax, name=\n 'search_rcv_ajax'), url('^download_rcv_by_id/(?P<rcv_id>\\\\d+)$', views.\n download_rcv_by_id, name='download_rcv_by_id'), url(\n '^view_rcv_by_id/(?P<rcv_id>\\\\d+)$', views.view_rcv_by_id, name=\n 'view_rcv_by_id')]\n",
"<import token>\napp_name = 'rcv_list'\nurlpatterns = [url('^(?P<year>\\\\d{4})/(?P<month>\\\\d{1,2})/$', views.\n view_files, name='view_files'), url('^$', views.view_dates, name=\n 'view_dates'), url('^index/$', views.view_dates, name='index'), url(\n '^all/$', views.all_index, name='all_index'), url(\n '^download_rcv/(?P<rcv_filename>.+)$', views.download_rcv, name=\n 'download_rcv'), url('^view_rcv/(?P<rcv_filename>.+)$', views.view_rcv,\n name='view_rcv'), url('^upload_files/$', views.upload_files, name=\n 'upload_files'), url('^check_files_to_model/$', views.\n check_files_to_model, name='check_files_to_model'), url('delete/$',\n views.delete_ajax, name='delete'), url('^edit/(?P<filename>.+)$', views\n .edit_name, name='edit'), url('^edit_list/$', views.view_edit_list,\n name='edit_list'), url('^search_rcv/?$', views.search_rcv, name=\n 'search_rcv'), url('^search_rcv/(?P<rcv>.+)$', views.search_rcv, name=\n 'searching_rcv'), url('^edit_file_ajax/$', views.edit_file_ajax, name=\n 'edit_file_ajax'), url('search_rcv_ajax', views.search_rcv_ajax, name=\n 'search_rcv_ajax'), url('^download_rcv_by_id/(?P<rcv_id>\\\\d+)$', views.\n download_rcv_by_id, name='download_rcv_by_id'), url(\n '^view_rcv_by_id/(?P<rcv_id>\\\\d+)$', views.view_rcv_by_id, name=\n 'view_rcv_by_id')]\n",
"<import token>\n<assignment token>\n"
] | false |
98,453 |
bd6a7585626ac29fee66ae41c28d60fef75d0fdb
|
import unittest
import console
import pep8
import os
import uuid
from io import StringIO
from unittest.mock import patch
from console import HBNBCommand
from models.engine.file_storage import FileStorage
from models import storage
class TestHBNB_prompt(unittest.TestCase):
def testprompt(self):
self.assertEqual("(hbnb) ", HBNBCommand.prompt)
def test_emptyline(self):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(""))
self.assertEqual("", output.getvalue().strip())
class TestConsole(unittest.TestCase):
"""
Test cases for the console
"""
jsfile_test = 'consoletest.json'
err = {'CLS_MISS': "** class name missing **",
'CLS_NOEX': "** class doesn't exist **",
'ID_MISS': "** instance id missing **",
'ID_NOEX': "** no instance found **",
'NO_ATTR': "** attribute name missing **",
'NO_VAL': "** value missing **"}
cls_list = ['BaseModel',
'Amenity',
'City',
'Place',
'Review',
'State',
'User']
def tearDown(self):
"""set enviroment when testing is finished"""
# Empty objects in engine
FileStorage._FileStorage__objects = {}
# Remove file.json if exists
if os.path.exists("file.json"):
os.remove("file.json")
def test_quit(self):
"""Test for help quit command
"""
_help = 'Quit method to exit form cmd '
_help += 'program (Usage: quit)\n'
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help quit")
self.assertEqual(f.getvalue(), _help)
def test_EOF(self):
"""Test for help EOF command
"""
_help = 'EOF method to exit cmd program\n'
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help EOF")
self.assertEqual(f.getvalue(), _help)
def test_all(self):
"""Test for help all command
"""
_help = "[Usage: all <class name>]or [Usage: all] or "\
"[Usage: <class name>.all()]\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help all")
self.assertEqual(f.getvalue(), _help)
def test_count(self):
"""Test for help count command
"""
_help = "[Usage: <class name>.count()]\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help count")
self.assertEqual(f.getvalue(), _help)
def test_create(self):
"""Test for help create command
"""
_help = "[Usage: create <class name>]\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help create")
self.assertEqual(f.getvalue(), _help)
def test_destroy(self):
"""Test for help EOF command
"""
_help = "[Usage: destroy <class name> <id>] or "\
"[Usage: <class name>.destroy(<id>)]\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help destroy")
self.assertEqual(f.getvalue(), _help)
def test_show(self):
"""Test for help show command
"""
_help = "[Usage: show <class name> <id>] or "\
"[Usage: <class name>.show(<id>)]\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help show")
self.assertEqual(f.getvalue(), _help)
def test_update(self):
"""Test for help update command
"""
_help = "[Usage: update <class name> <id> <attribute name> "\
'"<attribute value>"] or [Usage: <class name>.update(<id>,'\
"<attribute name>, <attribute value>)]\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help update")
self.assertEqual(f.getvalue(), _help)
def test_help(self):
"""Test for help a command that doesnt exist
"""
_help = "*** No help on hello\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("help hello")
self.assertEqual(f.getvalue(), _help)
def test_create(self):
"""Test for create command
"""
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create")
self.assertEqual(f.getvalue().strip(), "** class name missing **")
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create hello")
self.assertEqual(f.getvalue().strip(), "** class doesn't exist **")
for _class in self.clis:
with patch('sys.stdout', new=StringIO()) as f:
command = "create" + " " + _class
HBNBCommand().onecmd(command)
_id = f.getvalue().strip()
key = _class + "." + _id
self.assertTrue(key in storage.all().keys())
def test_unknown(self):
""" Command that does not exist """
msg = "*** Unknown syntax: asd\n"
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("asd")
st = f.getvalue()
self.assertEqual(msg, st)
def test_prompt_string(self):
self.assertEqual("(hbnb) ", HBNBCommand.prompt)
def test_empty_line(self):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(""))
self.assertEqual("", output.getvalue().strip())
def test_exits(self):
with patch("sys.stdout", new=StringIO()) as output:
self.assertTrue(HBNBCommand().onecmd("quit"))
def test_EOF(self):
with patch("sys.stdout", new=StringIO()) as output:
self.assertTrue(HBNBCommand().onecmd("EOF"))
@classmethod
def setUpClass(cls):
"""Set up for every test
"""
FileStorage._FileStorage__file_path = TestConsole.jsfile_test
def tearDown(self):
""" tear down method for every test
"""
if os.path.isfile(TestConsole.jsfile_test):
os.remove(TestConsole.jsfile_test)
def test_console_pep8_conformance(self):
"""The Console code is PEP8 conformant?
"""
style = pep8.StyleGuide(quiet=True)
result = style.check_files(['console.py'])
self.assertEqual(result.total_errors, 0)
def test_console_test_pep8_conformance(self):
""" The Console Test code is PEP8 conformant?
"""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(
['tests/test_console.py'])
self.assertEqual(result.total_errors, 0)
def test_console_documented(self):
"""Console has some documentation?
"""
self.assertTrue
self.assertTrue
(len(HBNBCommand.__doc__) >= 1)
def test_create_missing_class(self):
correct = "** class name missing **"
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create"))
self.assertEqual(correct, output.getvalue().strip())
def test_create_invalid_class(self):
correct = "** class doesn't exist **"
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create MyModel"))
self.assertEqual(correct, output.getvalue().strip())
def test_create_object(self):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create BaseModel"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "BaseModel.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create User"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "User.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create State"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "State.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create City"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "City.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create Amenity"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "Amenity.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create Place"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "Place.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd("create Review"))
self.assertLess(0, len(output.getvalue().strip()))
testKey = "Review.{}".format(output.getvalue().strip())
self.assertIn(testKey, storage.all().keys())
if __name__ == "__main__":
unittest.main()
|
[
"import unittest\nimport console\nimport pep8\nimport os\nimport uuid\nfrom io import StringIO\nfrom unittest.mock import patch\nfrom console import HBNBCommand\nfrom models.engine.file_storage import FileStorage\nfrom models import storage\n\n\nclass TestHBNB_prompt(unittest.TestCase):\n \n def testprompt(self):\n self.assertEqual(\"(hbnb) \", HBNBCommand.prompt)\n\n def test_emptyline(self):\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"\"))\n self.assertEqual(\"\", output.getvalue().strip())\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': \"** class name missing **\",\n 'CLS_NOEX': \"** class doesn't exist **\",\n 'ID_MISS': \"** instance id missing **\",\n 'ID_NOEX': \"** no instance found **\",\n 'NO_ATTR': \"** attribute name missing **\",\n 'NO_VAL': \"** value missing **\"}\n cls_list = ['BaseModel',\n 'Amenity',\n 'City',\n 'Place',\n 'Review',\n 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n # Empty objects in engine\n FileStorage._FileStorage__objects = {}\n # Remove file.json if exists\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help quit\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help EOF\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = \"[Usage: all <class name>]or [Usage: all] or \"\\\n \"[Usage: <class name>.all()]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help all\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = \"[Usage: <class name>.count()]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help count\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = \"[Usage: create <class name>]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help create\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = \"[Usage: destroy <class name> <id>] or \"\\\n \"[Usage: <class name>.destroy(<id>)]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help destroy\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = \"[Usage: show <class name> <id>] or \"\\\n \"[Usage: <class name>.show(<id>)]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help show\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"[Usage: update <class name> <id> <attribute name> \"\\\n '\"<attribute value>\"] or [Usage: <class name>.update(<id>,'\\\n \"<attribute name>, <attribute value>)]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help update\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = \"*** No help on hello\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help hello\")\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create\")\n self.assertEqual(f.getvalue().strip(), \"** class name missing **\")\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create hello\")\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = \"create\" + \" \" + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + \".\" + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = \"*** Unknown syntax: asd\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"asd\")\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual(\"(hbnb) \", HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"\"))\n self.assertEqual(\"\", output.getvalue().strip())\n\n def test_exits(self):\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd(\"quit\"))\n\n def test_EOF(self):\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd(\"EOF\"))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(\n ['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)\n\n def test_create_missing_class(self):\n correct = \"** class name missing **\"\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create\"))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create MyModel\"))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create BaseModel\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"BaseModel.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create User\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"User.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create State\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"State.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create City\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"City.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create Amenity\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"Amenity.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create Place\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"Place.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch(\"sys.stdout\", new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(\"create Review\"))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = \"Review.{}\".format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import unittest\nimport console\nimport pep8\nimport os\nimport uuid\nfrom io import StringIO\nfrom unittest.mock import patch\nfrom console import HBNBCommand\nfrom models.engine.file_storage import FileStorage\nfrom models import storage\n\n\nclass TestHBNB_prompt(unittest.TestCase):\n\n def testprompt(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_emptyline(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestHBNB_prompt(unittest.TestCase):\n\n def testprompt(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_emptyline(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestHBNB_prompt(unittest.TestCase):\n\n def testprompt(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_emptyline(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n\n\nclass TestHBNB_prompt(unittest.TestCase):\n\n def testprompt(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n <function token>\n\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n\n\nclass TestHBNB_prompt(unittest.TestCase):\n <function token>\n <function token>\n\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n \"\"\"\n Test cases for the console\n \"\"\"\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n jsfile_test = 'consoletest.json'\n err = {'CLS_MISS': '** class name missing **', 'CLS_NOEX':\n \"** class doesn't exist **\", 'ID_MISS': '** instance id missing **',\n 'ID_NOEX': '** no instance found **', 'NO_ATTR':\n '** attribute name missing **', 'NO_VAL': '** value missing **'}\n cls_list = ['BaseModel', 'Amenity', 'City', 'Place', 'Review', 'State',\n 'User']\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n\n def test_unknown(self):\n \"\"\" Command that does not exist \"\"\"\n msg = '*** Unknown syntax: asd\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('asd')\n st = f.getvalue()\n self.assertEqual(msg, st)\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n\n def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('EOF'))\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def tearDown(self):\n \"\"\"set enviroment when testing is finished\"\"\"\n FileStorage._FileStorage__objects = {}\n if os.path.exists('file.json'):\n os.remove('file.json')\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n\n def test_update(self):\n \"\"\"Test for help update command\n \"\"\"\n _help = \"\"\"[Usage: update <class name> <id> <attribute name> \"<attribute value>\"] or [Usage: <class name>.update(<id>,<attribute name>, <attribute value>)]\n\"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help update')\n self.assertEqual(f.getvalue(), _help)\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def test_quit(self):\n \"\"\"Test for help quit command\n \"\"\"\n _help = 'Quit method to exit form cmd '\n _help += 'program (Usage: quit)\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help quit')\n self.assertEqual(f.getvalue(), _help)\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_test_pep8_conformance(self):\n \"\"\" The Console Test code is PEP8 conformant?\n \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['tests/test_console.py'])\n self.assertEqual(result.total_errors, 0)\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n\n def test_count(self):\n \"\"\"Test for help count command\n \"\"\"\n _help = '[Usage: <class name>.count()]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help count')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_object(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create BaseModel'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'BaseModel.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create User'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'User.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create State'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'State.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create City'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'City.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Amenity'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Amenity.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Place'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Place.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create Review'))\n self.assertLess(0, len(output.getvalue().strip()))\n testKey = 'Review.{}'.format(output.getvalue().strip())\n self.assertIn(testKey, storage.all().keys())\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up for every test\n \"\"\"\n FileStorage._FileStorage__file_path = TestConsole.jsfile_test\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_help(self):\n \"\"\"Test for help a command that doesnt exist\n \"\"\"\n _help = '*** No help on hello\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help hello')\n self.assertEqual(f.getvalue(), _help)\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n\n def test_prompt_string(self):\n self.assertEqual('(hbnb) ', HBNBCommand.prompt)\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n\n def test_show(self):\n \"\"\"Test for help show command\n \"\"\"\n _help = (\n '[Usage: show <class name> <id>] or [Usage: <class name>.show(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help show')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n\n def test_console_pep8_conformance(self):\n \"\"\"The Console code is PEP8 conformant?\n \"\"\"\n style = pep8.StyleGuide(quiet=True)\n result = style.check_files(['console.py'])\n self.assertEqual(result.total_errors, 0)\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n\n def test_destroy(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = (\n '[Usage: destroy <class name> <id>] or [Usage: <class name>.destroy(<id>)]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help destroy')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n\n def test_empty_line(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd(''))\n self.assertEqual('', output.getvalue().strip())\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n\n def tearDown(self):\n \"\"\" tear down method for every test\n \"\"\"\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n\n def test_create(self):\n \"\"\"Test for help create command\n \"\"\"\n _help = '[Usage: create <class name>]\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help create')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n\n def test_exits(self):\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertTrue(HBNBCommand().onecmd('quit'))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n\n def test_all(self):\n \"\"\"Test for help all command\n \"\"\"\n _help = (\n '[Usage: all <class name>]or [Usage: all] or [Usage: <class name>.all()]\\n'\n )\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help all')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_console_documented(self):\n \"\"\"Console has some documentation?\n \"\"\"\n self.assertTrue\n self.assertTrue\n len(HBNBCommand.__doc__) >= 1\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def test_EOF(self):\n \"\"\"Test for help EOF command\n \"\"\"\n _help = 'EOF method to exit cmd program\\n'\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('help EOF')\n self.assertEqual(f.getvalue(), _help)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n\n def test_create_invalid_class(self):\n correct = \"** class doesn't exist **\"\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create MyModel'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create_missing_class(self):\n correct = '** class name missing **'\n with patch('sys.stdout', new=StringIO()) as output:\n self.assertFalse(HBNBCommand().onecmd('create'))\n self.assertEqual(correct, output.getvalue().strip())\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_create(self):\n \"\"\"Test for create command\n \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create')\n self.assertEqual(f.getvalue().strip(), '** class name missing **')\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd('create hello')\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = 'create' + ' ' + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + '.' + _id\n self.assertTrue(key in storage.all().keys())\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestConsole(unittest.TestCase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<code token>\n"
] | false |
98,454 |
db5c3c9facdc81fddce588c12c312e7f0b4f9adf
|
# Generated by Django 3.1 on 2020-08-18 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0002_budgets_bamount'),
]
operations = [
migrations.AlterField(
model_name='budgets',
name='bID',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='expenses',
name='eID',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
[
"# Generated by Django 3.1 on 2020-08-18 16:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('budget', '0002_budgets_bamount'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='budgets',\n name='bID',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='expenses',\n name='eID',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('budget', '0002_budgets_bamount')]\n operations = [migrations.AlterField(model_name='budgets', name='bID',\n field=models.AutoField(primary_key=True, serialize=False)),\n migrations.AlterField(model_name='expenses', name='eID', field=\n models.AutoField(primary_key=True, serialize=False))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('budget', '0002_budgets_bamount')]\n operations = [migrations.AlterField(model_name='budgets', name='bID',\n field=models.AutoField(primary_key=True, serialize=False)),\n migrations.AlterField(model_name='expenses', name='eID', field=\n models.AutoField(primary_key=True, serialize=False))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,455 |
14fdbe545962ac23a77e6b50e6bfa1d9acf401c4
|
import pandas as pd
import numpy as np
terrorism = pd.read_csv("terrorismData.csv")
df = terrorism.copy()
df = df[(df.Day >= 10)]
df = df[(df.Day <= 20)]
print(df.shape[0])
|
[
"import pandas as pd\r\nimport numpy as np\r\nterrorism = pd.read_csv(\"terrorismData.csv\")\r\ndf = terrorism.copy()\r\ndf = df[(df.Day >= 10)]\r\ndf = df[(df.Day <= 20)]\r\nprint(df.shape[0])",
"import pandas as pd\nimport numpy as np\nterrorism = pd.read_csv('terrorismData.csv')\ndf = terrorism.copy()\ndf = df[df.Day >= 10]\ndf = df[df.Day <= 20]\nprint(df.shape[0])\n",
"<import token>\nterrorism = pd.read_csv('terrorismData.csv')\ndf = terrorism.copy()\ndf = df[df.Day >= 10]\ndf = df[df.Day <= 20]\nprint(df.shape[0])\n",
"<import token>\n<assignment token>\nprint(df.shape[0])\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,456 |
2bbaca1c1ead93e1c25ad6d66d8d46cf2fd93d8e
|
COMPUTATION_NODE_PARAM_NAME = {
'name': 'name',
'description': 'Computation Node name',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'
}
COMPUTATION_NODE_PARAM_ADDRESS = {
'name': 'address',
'description': 'Computation Node address',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
COMPUTATION_NODE_PARAM_PORT = {
'name': 'port',
'description': 'Computation Node port',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'query'
}
COMPUTATION_NODE_NOT_FOUND_RESPONSE = {
'code': 404,
'message': 'Computation node could not be found'
}
COMPUTATION_NODE_PUT_NOT_FOUND_RESPONSE = {
'code': 406,
'message': 'Computation node could not be found'
}
COMPUTATION_NODE_FETCHED_RESPONSE = {
'code': 200,
'message': 'Node info fetched successfully'
}
CONSTRAINTS_FETCHED_RESPONSE = {
'code': 200,
'message': 'Constraints fetched successfully'
}
POWER_USAGE_FETCHED_RESPONSE = {
'code': 200,
'message': 'Power usage fetched successfully'
}
COMPUTATION_NODE_ADDED_RESPONSE = {
'code': 201,
'message': 'Node added successfully'
}
ALL_COMPUTATION_NODES_SORT_PARAM = {
'name': 'sort',
'description': 'Records sorting order',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
ALL_COMPUTATION_NODES_FILTER_PARAM = {
'name': 'filter',
'description': 'Node name filter',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
ALL_COMPUTATION_NODES_PAGINATION_PARAM = {
'name': 'pagination',
'description': 'Database pagination',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
ALL_COMPUTATION_NODES_NODE_ADDRESS_PARAM = {
'name': 'address',
'description': 'Address (domain, ip) of computation node',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
DEVICE_IDENTIFIER_PARAM = {
'name': 'device_id',
'description': 'Device identifier',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'
}
NODE_AND_DEVICE_PARAMS = [DEVICE_IDENTIFIER_PARAM, COMPUTATION_NODE_PARAM_NAME]
DEVICE_POWER_LIMIT_PARAM = {
'name': 'power_limit',
'description': 'Power limit',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'query'
}
DEVICE_SOFT_LIMIT_PARAM = {
'name': 'soft_limit',
'description': 'Soft limit',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'query'
}
STATISTICS_INTERVAL_PARAM = {
'name': 'statistics_interval',
'description': 'Statistics gathering interval in minutes',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'query'
}
DATETIME_PARAM = {
'name': 'date_time',
'description': 'Date and time',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'
}
DATETIME_PARAM_BEGIN = {
'name': 'date_time_begin',
'description': 'Date and time',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
DATETIME_PARAM_END = {
'name': 'date_time_end',
'description': 'Date and time',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
POWER_USAGE_PARAM = {
'name': 'power_usage',
'description': 'Power usage',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
RULE_TYPE_PARAM = {
'name': 'rule_type',
'description': 'Rule type',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
RULE_PARAMS_PARAM = {
'name': 'rule_params',
'description': 'Rule parameters',
'required': True,
'allowMultiple': False,
'dataType': 'json',
'paramType': 'body'
}
DEVICE_POWER_LIMIT_SET_RESPONSE = {
'code': 201,
'message': 'Power limit set successfully'
}
DEVICE_SOFT_LIMIT_SET_RESPONSE = {
'code': 201,
'message': 'Soft limit set successfully'
}
DEVICE_POWER_LIMIT_SET_RESPONSE_FAILURE = {
'code': 406,
'message': 'Power limit could not be set successfully'
}
DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE = {
'code': 406,
'message': 'Soft limit could not be set successfully'
}
RULE_SET_RESPONSE = {
'code': 201,
'message': 'Rule set successfully'
}
RULES_DELETED_RESPONSE = {
'code': 200,
'message': 'Rules deleted successfully'
}
RULE_WITHDRAWN_RESPONSE = {
'code': 201,
'message': 'Rule withdrawn successfully'
}
DEVICE_NOT_FOUND_RESPONSE = {
'code': 404,
'message': 'Device could not be found'
}
POWER_LIMIT_DELETED_FROM_DB_BUT_NOT_FROM_DEVICE = {
'code': 406,
'message': 'Power limit was deleted from database but could not be deleted from device'
}
ALL_DEVICES_GET_OK_RESPONSE = {
'code': 200,
'message': 'Ok'
}
STATISTICS_INTERVAL_SET_RESPONSE = {
'code': 201,
'message': 'Statistics gathering interval set successfully'
}
STATISTICS_DATA_UPDATED_RESPONSE = {
'code': 201,
'message': 'Statistics data updated successfully'
}
DEVICE_NOT_FOUND_AND_COMPUTATION_NODE_FETCHED_RESPONSES = [
DEVICE_NOT_FOUND_RESPONSE,
COMPUTATION_NODE_FETCHED_RESPONSE
]
AVAILABLE_RULE_TYPES = [
'TimeBased',
'HardLimit',
'Withdrawable'
]
|
[
"COMPUTATION_NODE_PARAM_NAME = {\n 'name': 'name',\n 'description': 'Computation Node name',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'path'\n}\n\nCOMPUTATION_NODE_PARAM_ADDRESS = {\n 'name': 'address',\n 'description': 'Computation Node address',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nCOMPUTATION_NODE_PARAM_PORT = {\n 'name': 'port',\n 'description': 'Computation Node port',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'int',\n 'paramType': 'query'\n}\n\nCOMPUTATION_NODE_NOT_FOUND_RESPONSE = {\n 'code': 404,\n 'message': 'Computation node could not be found'\n}\n\nCOMPUTATION_NODE_PUT_NOT_FOUND_RESPONSE = {\n 'code': 406,\n 'message': 'Computation node could not be found'\n}\n\nCOMPUTATION_NODE_FETCHED_RESPONSE = {\n 'code': 200,\n 'message': 'Node info fetched successfully'\n}\n\nCONSTRAINTS_FETCHED_RESPONSE = {\n 'code': 200,\n 'message': 'Constraints fetched successfully'\n}\n\nPOWER_USAGE_FETCHED_RESPONSE = {\n 'code': 200,\n 'message': 'Power usage fetched successfully'\n}\n\nCOMPUTATION_NODE_ADDED_RESPONSE = {\n 'code': 201,\n 'message': 'Node added successfully'\n}\n\nALL_COMPUTATION_NODES_SORT_PARAM = {\n 'name': 'sort',\n 'description': 'Records sorting order',\n 'required': False,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nALL_COMPUTATION_NODES_FILTER_PARAM = {\n 'name': 'filter',\n 'description': 'Node name filter',\n 'required': False,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nALL_COMPUTATION_NODES_PAGINATION_PARAM = {\n 'name': 'pagination',\n 'description': 'Database pagination',\n 'required': False,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nALL_COMPUTATION_NODES_NODE_ADDRESS_PARAM = {\n 'name': 'address',\n 'description': 'Address (domain, ip) of computation node',\n 'required': False,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\n\nDEVICE_IDENTIFIER_PARAM = {\n 'name': 'device_id',\n 'description': 'Device identifier',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'path'\n}\n\nNODE_AND_DEVICE_PARAMS = [DEVICE_IDENTIFIER_PARAM, COMPUTATION_NODE_PARAM_NAME]\n\nDEVICE_POWER_LIMIT_PARAM = {\n 'name': 'power_limit',\n 'description': 'Power limit',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'int',\n 'paramType': 'query'\n}\n\nDEVICE_SOFT_LIMIT_PARAM = {\n 'name': 'soft_limit',\n 'description': 'Soft limit',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'int',\n 'paramType': 'query'\n}\n\nSTATISTICS_INTERVAL_PARAM = {\n 'name': 'statistics_interval',\n 'description': 'Statistics gathering interval in minutes',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'int',\n 'paramType': 'query'\n}\n\nDATETIME_PARAM = {\n 'name': 'date_time',\n 'description': 'Date and time',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'path'\n}\n\nDATETIME_PARAM_BEGIN = {\n 'name': 'date_time_begin',\n 'description': 'Date and time',\n 'required': False,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nDATETIME_PARAM_END = {\n 'name': 'date_time_end',\n 'description': 'Date and time',\n 'required': False,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nPOWER_USAGE_PARAM = {\n 'name': 'power_usage',\n 'description': 'Power usage',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nRULE_TYPE_PARAM = {\n 'name': 'rule_type',\n 'description': 'Rule type',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'string',\n 'paramType': 'query'\n}\n\nRULE_PARAMS_PARAM = {\n 'name': 'rule_params',\n 'description': 'Rule parameters',\n 'required': True,\n 'allowMultiple': False,\n 'dataType': 'json',\n 'paramType': 'body'\n}\n\nDEVICE_POWER_LIMIT_SET_RESPONSE = {\n 'code': 201,\n 'message': 'Power limit set successfully'\n}\n\nDEVICE_SOFT_LIMIT_SET_RESPONSE = {\n 'code': 201,\n 'message': 'Soft limit set successfully'\n}\n\nDEVICE_POWER_LIMIT_SET_RESPONSE_FAILURE = {\n 'code': 406,\n 'message': 'Power limit could not be set successfully'\n}\n\nDEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE = {\n 'code': 406,\n 'message': 'Soft limit could not be set successfully'\n}\n\nRULE_SET_RESPONSE = {\n 'code': 201,\n 'message': 'Rule set successfully'\n}\n\nRULES_DELETED_RESPONSE = {\n 'code': 200,\n 'message': 'Rules deleted successfully'\n}\n\nRULE_WITHDRAWN_RESPONSE = {\n 'code': 201,\n 'message': 'Rule withdrawn successfully'\n}\n\nDEVICE_NOT_FOUND_RESPONSE = {\n 'code': 404,\n 'message': 'Device could not be found'\n}\n\nPOWER_LIMIT_DELETED_FROM_DB_BUT_NOT_FROM_DEVICE = {\n 'code': 406,\n 'message': 'Power limit was deleted from database but could not be deleted from device'\n}\n\nALL_DEVICES_GET_OK_RESPONSE = {\n 'code': 200,\n 'message': 'Ok'\n}\n\nSTATISTICS_INTERVAL_SET_RESPONSE = {\n 'code': 201,\n 'message': 'Statistics gathering interval set successfully'\n}\n\nSTATISTICS_DATA_UPDATED_RESPONSE = {\n 'code': 201,\n 'message': 'Statistics data updated successfully'\n}\n\nDEVICE_NOT_FOUND_AND_COMPUTATION_NODE_FETCHED_RESPONSES = [\n DEVICE_NOT_FOUND_RESPONSE,\n COMPUTATION_NODE_FETCHED_RESPONSE\n]\n\nAVAILABLE_RULE_TYPES = [\n 'TimeBased',\n 'HardLimit',\n 'Withdrawable'\n]\n",
"COMPUTATION_NODE_PARAM_NAME = {'name': 'name', 'description':\n 'Computation Node name', 'required': True, 'allowMultiple': False,\n 'dataType': 'string', 'paramType': 'path'}\nCOMPUTATION_NODE_PARAM_ADDRESS = {'name': 'address', 'description':\n 'Computation Node address', 'required': True, 'allowMultiple': False,\n 'dataType': 'string', 'paramType': 'query'}\nCOMPUTATION_NODE_PARAM_PORT = {'name': 'port', 'description':\n 'Computation Node port', 'required': True, 'allowMultiple': False,\n 'dataType': 'int', 'paramType': 'query'}\nCOMPUTATION_NODE_NOT_FOUND_RESPONSE = {'code': 404, 'message':\n 'Computation node could not be found'}\nCOMPUTATION_NODE_PUT_NOT_FOUND_RESPONSE = {'code': 406, 'message':\n 'Computation node could not be found'}\nCOMPUTATION_NODE_FETCHED_RESPONSE = {'code': 200, 'message':\n 'Node info fetched successfully'}\nCONSTRAINTS_FETCHED_RESPONSE = {'code': 200, 'message':\n 'Constraints fetched successfully'}\nPOWER_USAGE_FETCHED_RESPONSE = {'code': 200, 'message':\n 'Power usage fetched successfully'}\nCOMPUTATION_NODE_ADDED_RESPONSE = {'code': 201, 'message':\n 'Node added successfully'}\nALL_COMPUTATION_NODES_SORT_PARAM = {'name': 'sort', 'description':\n 'Records sorting order', 'required': False, 'allowMultiple': False,\n 'dataType': 'string', 'paramType': 'query'}\nALL_COMPUTATION_NODES_FILTER_PARAM = {'name': 'filter', 'description':\n 'Node name filter', 'required': False, 'allowMultiple': False,\n 'dataType': 'string', 'paramType': 'query'}\nALL_COMPUTATION_NODES_PAGINATION_PARAM = {'name': 'pagination',\n 'description': 'Database pagination', 'required': False,\n 'allowMultiple': False, 'dataType': 'string', 'paramType': 'query'}\nALL_COMPUTATION_NODES_NODE_ADDRESS_PARAM = {'name': 'address',\n 'description': 'Address (domain, ip) of computation node', 'required': \n False, 'allowMultiple': False, 'dataType': 'string', 'paramType': 'query'}\nDEVICE_IDENTIFIER_PARAM = {'name': 'device_id', 'description':\n 'Device identifier', 'required': True, 'allowMultiple': False,\n 'dataType': 'string', 'paramType': 'path'}\nNODE_AND_DEVICE_PARAMS = [DEVICE_IDENTIFIER_PARAM, COMPUTATION_NODE_PARAM_NAME]\nDEVICE_POWER_LIMIT_PARAM = {'name': 'power_limit', 'description':\n 'Power limit', 'required': True, 'allowMultiple': False, 'dataType':\n 'int', 'paramType': 'query'}\nDEVICE_SOFT_LIMIT_PARAM = {'name': 'soft_limit', 'description':\n 'Soft limit', 'required': True, 'allowMultiple': False, 'dataType':\n 'int', 'paramType': 'query'}\nSTATISTICS_INTERVAL_PARAM = {'name': 'statistics_interval', 'description':\n 'Statistics gathering interval in minutes', 'required': True,\n 'allowMultiple': False, 'dataType': 'int', 'paramType': 'query'}\nDATETIME_PARAM = {'name': 'date_time', 'description': 'Date and time',\n 'required': True, 'allowMultiple': False, 'dataType': 'string',\n 'paramType': 'path'}\nDATETIME_PARAM_BEGIN = {'name': 'date_time_begin', 'description':\n 'Date and time', 'required': False, 'allowMultiple': False, 'dataType':\n 'string', 'paramType': 'query'}\nDATETIME_PARAM_END = {'name': 'date_time_end', 'description':\n 'Date and time', 'required': False, 'allowMultiple': False, 'dataType':\n 'string', 'paramType': 'query'}\nPOWER_USAGE_PARAM = {'name': 'power_usage', 'description': 'Power usage',\n 'required': True, 'allowMultiple': False, 'dataType': 'string',\n 'paramType': 'query'}\nRULE_TYPE_PARAM = {'name': 'rule_type', 'description': 'Rule type',\n 'required': True, 'allowMultiple': False, 'dataType': 'string',\n 'paramType': 'query'}\nRULE_PARAMS_PARAM = {'name': 'rule_params', 'description':\n 'Rule parameters', 'required': True, 'allowMultiple': False, 'dataType':\n 'json', 'paramType': 'body'}\nDEVICE_POWER_LIMIT_SET_RESPONSE = {'code': 201, 'message':\n 'Power limit set successfully'}\nDEVICE_SOFT_LIMIT_SET_RESPONSE = {'code': 201, 'message':\n 'Soft limit set successfully'}\nDEVICE_POWER_LIMIT_SET_RESPONSE_FAILURE = {'code': 406, 'message':\n 'Power limit could not be set successfully'}\nDEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE = {'code': 406, 'message':\n 'Soft limit could not be set successfully'}\nRULE_SET_RESPONSE = {'code': 201, 'message': 'Rule set successfully'}\nRULES_DELETED_RESPONSE = {'code': 200, 'message': 'Rules deleted successfully'}\nRULE_WITHDRAWN_RESPONSE = {'code': 201, 'message':\n 'Rule withdrawn successfully'}\nDEVICE_NOT_FOUND_RESPONSE = {'code': 404, 'message':\n 'Device could not be found'}\nPOWER_LIMIT_DELETED_FROM_DB_BUT_NOT_FROM_DEVICE = {'code': 406, 'message':\n 'Power limit was deleted from database but could not be deleted from device'\n }\nALL_DEVICES_GET_OK_RESPONSE = {'code': 200, 'message': 'Ok'}\nSTATISTICS_INTERVAL_SET_RESPONSE = {'code': 201, 'message':\n 'Statistics gathering interval set successfully'}\nSTATISTICS_DATA_UPDATED_RESPONSE = {'code': 201, 'message':\n 'Statistics data updated successfully'}\nDEVICE_NOT_FOUND_AND_COMPUTATION_NODE_FETCHED_RESPONSES = [\n DEVICE_NOT_FOUND_RESPONSE, COMPUTATION_NODE_FETCHED_RESPONSE]\nAVAILABLE_RULE_TYPES = ['TimeBased', 'HardLimit', 'Withdrawable']\n",
"<assignment token>\n"
] | false |
98,457 |
6aa0f40cd523b7b745eb6a9b58dad4df8a75b7c1
|
##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import IECore
import IECoreScene
import imath
class TestCamera( unittest.TestCase ) :
def assertBox2fEqual( self, box, x1, y1, x2, y2 ):
self.assertAlmostEqual( box.min().x, x1 )
self.assertAlmostEqual( box.min().y, y1 )
self.assertAlmostEqual( box.max().x, x2 )
self.assertAlmostEqual( box.max().y, y2 )
def test( self ) :
c = IECoreScene.Camera()
self.assertEqual( c.parameters(), IECore.CompoundData() )
cc = c.copy()
self.assertEqual( cc.parameters(), IECore.CompoundData() )
self.assertEqual( cc, c )
IECore.Writer.create( cc, os.path.join( "test", "IECore", "data", "camera.cob" ) ).write()
ccc = IECore.Reader.create( os.path.join( "test", "IECore", "data", "camera.cob" ) ).read()
self.assertEqual( c, ccc )
c.setFocalLength( 5 )
self.assertEqual( c.getFocalLength(), 5 )
# test copying and saving with some parameters
cc = c.copy()
self.assertEqual( cc, c )
IECore.Writer.create( cc, os.path.join( "test", "IECore", "data", "camera.cob" ) ).write()
ccc = IECore.Reader.create( os.path.join( "test", "IECore", "data", "camera.cob" ) ).read()
self.assertEqual( ccc, c )
def testCameraParameters( self ) :
c = IECoreScene.Camera()
# Defaults
self.assertEqual( c.getProjection(), "orthographic" )
self.assertEqual( c.getAperture(), imath.V2f( 2, 2 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
self.assertEqual( c.getFocalLength(), 1 )
self.assertEqual( c.getClippingPlanes(), imath.V2f( 0.01, 100000 ) )
self.assertEqual( c.getFStop(), 0 )
self.assertAlmostEqual( c.getFocalLengthWorldScale(), 0.1 )
self.assertEqual( c.getFocusDistance(), 1 )
# Set some values
c.setProjection("perspective" )
c.setAperture(imath.V2f( 36, 24 ) )
c.setApertureOffset(imath.V2f( 1, -1 ) )
c.setFocalLength( 35 )
c.setClippingPlanes(imath.V2f( -10, 42 ) )
c.setFStop( 3.0 )
c.setFocalLengthWorldScale( 0.001 )
c.setFocusDistance( 12.0 )
# Test that we've got the new values
self.assertEqual( c.getProjection(), "perspective" )
self.assertEqual( c.getAperture(), imath.V2f( 36, 24 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 1, -1 ) )
self.assertEqual( c.getFocalLength(), 35 )
self.assertEqual( c.getClippingPlanes(), imath.V2f( -10, 42 ) )
self.assertEqual( c.getFStop(), 3.0 )
self.assertAlmostEqual( c.getFocalLengthWorldScale(), 0.001 )
self.assertEqual( c.getFocusDistance(), 12.0 )
def testDefaultApertureFromObseleteFovAndScreenWindow( self ) :
c = IECoreScene.Camera()
c.parameters()["resolution"] = imath.V2i( 100, 100 )
c.parameters()["projection"] = "perspective"
self.assertEqual( c.getAperture(), imath.V2f( 2, 2 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
c.parameters()["projection:fov"] = 90.0
self.assertEqual( c.getAperture(), imath.V2f( 2, 2 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
c.parameters()["projection:fov"] = 60.0
self.assertAlmostEqual( c.getAperture()[0], 1 / (3 ** 0.5) * 2, places = 6 )
self.assertAlmostEqual( c.getAperture()[1], 1 / (3 ** 0.5) * 2, places = 6 )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
c.parameters()["projection:fov"] = 90.0
c.setResolution( imath.V2i( 200, 100 ) )
self.assertEqual( c.getAperture(), imath.V2f( 4, 2 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
c.setPixelAspectRatio( 2 )
self.assertEqual( c.getAperture(), imath.V2f( 8, 2 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
c.parameters()["projection:fov"] = 90.0
c.setResolution( imath.V2i( 100, 100 ) )
c.setPixelAspectRatio( 1 )
c.parameters()["screenWindow"] = imath.Box2f( imath.V2f( 10, -3 ), imath.V2f( 11, 5 ) )
self.assertEqual( c.getAperture(), imath.V2f( 1, 8 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 10.5, 1 ) )
c.parameters()["screenWindow"] = imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) )
c.setFocalLength( 35 )
self.assertEqual( c.getAperture(), imath.V2f( 70, 70 ) )
self.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )
def testRenderOverrides( self ):
c = IECoreScene.Camera()
self.assertEqual( c.hasFilmFit(), False )
self.assertEqual( c.hasResolution(), False )
self.assertEqual( c.hasPixelAspectRatio(), False )
self.assertEqual( c.hasResolutionMultiplier(), False )
self.assertEqual( c.hasOverscan(), False )
self.assertEqual( c.hasOverscanLeft(), False )
self.assertEqual( c.hasOverscanRight(), False )
self.assertEqual( c.hasOverscanTop(), False )
self.assertEqual( c.hasOverscanBottom(), False )
self.assertEqual( c.hasCropWindow(), False )
self.assertEqual( c.hasShutter(), False )
c.setFilmFit( IECoreScene.Camera.FilmFit.Vertical )
c.setResolution( imath.V2i( 1280, 720 ) )
c.setPixelAspectRatio( 2 )
c.setResolutionMultiplier( 0.5 )
c.setOverscan( True )
c.setOverscanLeft( 0.2 )
c.setOverscanRight( 0.1 )
c.setOverscanTop( 0.3 )
c.setOverscanBottom( 0.4 )
c.setCropWindow( imath.Box2f( imath.V2f( 0.1, 0.2 ), imath.V2f( 0.8, 0.9 ) ) )
c.setShutter( imath.V2f( -0.7, 0.3 ) )
self.assertEqual( c.hasFilmFit(), True )
self.assertEqual( c.hasResolution(), True )
self.assertEqual( c.hasPixelAspectRatio(), True )
self.assertEqual( c.hasResolutionMultiplier(), True )
self.assertEqual( c.hasOverscan(), True )
self.assertEqual( c.hasOverscanLeft(), True )
self.assertEqual( c.hasOverscanRight(), True )
self.assertEqual( c.hasOverscanTop(), True )
self.assertEqual( c.hasOverscanBottom(), True )
self.assertEqual( c.hasCropWindow(), True )
self.assertEqual( c.hasShutter(), True )
self.assertEqual( c.getFilmFit(), IECoreScene.Camera.FilmFit.Vertical )
self.assertEqual( c.getResolution(), imath.V2i( 1280, 720 ) )
self.assertEqual( c.getPixelAspectRatio(), 2 )
self.assertEqual( c.getResolutionMultiplier(), 0.5 )
self.assertEqual( c.getOverscan(), True )
self.assertAlmostEqual( c.getOverscanLeft(), 0.2 )
self.assertAlmostEqual( c.getOverscanRight(), 0.1 )
self.assertAlmostEqual( c.getOverscanTop(), 0.3 )
self.assertAlmostEqual( c.getOverscanBottom(), 0.4 )
self.assertBox2fEqual( c.getCropWindow(), 0.1, 0.2, 0.8, 0.9 )
self.assertAlmostEqual( c.getShutter(), imath.V2f( -0.7, 0.3 ) )
c.removeFilmFit()
c.removeResolution()
c.removePixelAspectRatio()
c.removeResolutionMultiplier()
c.removeOverscan()
c.removeOverscanLeft()
c.removeOverscanRight()
c.removeOverscanTop()
c.removeOverscanBottom()
c.removeCropWindow()
c.removeShutter()
self.assertEqual( c.hasFilmFit(), False )
self.assertEqual( c.hasResolution(), False )
self.assertEqual( c.hasPixelAspectRatio(), False )
self.assertEqual( c.hasResolutionMultiplier(), False )
self.assertEqual( c.hasOverscan(), False )
self.assertEqual( c.hasOverscanLeft(), False )
self.assertEqual( c.hasOverscanRight(), False )
self.assertEqual( c.hasOverscanTop(), False )
self.assertEqual( c.hasOverscanBottom(), False )
self.assertEqual( c.hasCropWindow(), False )
self.assertEqual( c.hasShutter(), False )
def testHash( self ) :
c = IECoreScene.Camera()
h = c.hash()
c.setFocalLength( 12 )
self.assertNotEqual( c.hash(), h )
h = c.hash()
def testNormalizedScreenWindow( self ):
c = IECoreScene.Camera()
self.assertBox2fEqual( c.frustum(), -1, -0.75, 1, 0.75 )
c.setFocalLength( 2 )
self.assertBox2fEqual( c.frustum(), -1, -0.75, 1, 0.75 )
c.setProjection("perspective" )
self.assertBox2fEqual( c.frustum(), -0.5, -0.375, 0.5, 0.375 )
c.setAperture(imath.V2f( 4, 4 ) )
self.assertBox2fEqual( c.frustum(), -1, -0.75, 1, 0.75 )
c.setApertureOffset(imath.V2f( 1, 1 ) )
self.assertBox2fEqual( c.frustum(), -0.5, -0.25, 1.5, 1.25 )
c.setFocalLength( 1 )
self.assertBox2fEqual( c.frustum(), -1, -0.5, 3, 2.5 )
c.setResolution(imath.V2i( 100, 100 ) )
self.assertBox2fEqual( c.frustum(), -1, -1, 3, 3 )
def testRenderImageSpec( self ):
def B( x1, y1, x2, y2 ):
return imath.Box2i( imath.V2i( x1, y1 ), imath.V2i( x2, y2 ) )
c = IECoreScene.Camera()
self.assertEqual( c.renderResolution(), imath.V2i( 640, 480 ) )
self.assertEqual( c.renderRegion(), B( 0, 0, 640, 480 ) )
c.setResolution( imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderRegion(), B( 0, 0, 1920, 1080 ) )
c.setOverscanLeft( 0.1 )
self.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderRegion(), B( 0, 0, 1920, 1080 ) )
c.setOverscan( True )
self.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderRegion(), B( -192, 0, 1920, 1080 ) )
c.setOverscanRight( 1.0 )
c.setOverscanTop( 0.5 )
c.setOverscanBottom( 0.25 )
self.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderRegion(), B( -192, -270, 3840, 1620 ) )
c.setCropWindow( imath.Box2f( imath.V2f( 0, 0 ), imath.V2f( 1, 1 ) ) )
self.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderRegion(), B( 0, 0, 1920, 1080 ) )
c.setCropWindow( imath.Box2f( imath.V2f( 0.2, 0.3 ), imath.V2f( 0.8, 0.5 ) ) )
self.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )
self.assertEqual( c.renderRegion(), B( 384, 1080 - 540, 1536, 1080 - 324 ) )
def testFitWindow( self ):
def B( x1, y1, x2, y2 ):
return imath.Box2f( imath.V2f( x1, y1 ), imath.V2f( x2, y2 ) )
FitMode = IECoreScene.Camera.FilmFit
cc = IECoreScene.Camera
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Horizontal, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Vertical, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fit, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fill, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Distort, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Horizontal, 1.0 ), -2, -2, 2, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Vertical, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Fit, 1.0 ), -2, -2, 2, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Fill, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Distort, 1.0 ), -2, -1, 2, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Horizontal, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Vertical, 1.0 ), -2, -2, 2, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Fit, 1.0 ), -2, -2, 2, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Fill, 1.0 ), -1, -1, 1, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Distort, 1.0 ), -1, -2, 1, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Horizontal, 0.5 ), -1, -2, 1, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Vertical, 0.5 ), -0.5, -1, 0.5, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fit, 0.5 ), -1, -2, 1, 2 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fill, 0.5 ), -0.5, -1, 0.5, 1 )
self.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Distort, 0.5 ), -1, -1, 1, 1 )
def tearDown( self ) :
if os.path.isfile( os.path.join( "test", "IECore", "data", "camera.cob" ) ) :
os.remove( os.path.join( "test", "IECore", "data", "camera.cob" ) )
if __name__ == "__main__":
unittest.main()
|
[
"##########################################################################\n#\n# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Image Engine Design nor the names of any\n# other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\nimport unittest\nimport os.path\n\nimport IECore\nimport IECoreScene\n\nimport imath\n\nclass TestCamera( unittest.TestCase ) :\n\n\tdef assertBox2fEqual( self, box, x1, y1, x2, y2 ):\n\t\tself.assertAlmostEqual( box.min().x, x1 )\n\t\tself.assertAlmostEqual( box.min().y, y1 )\n\t\tself.assertAlmostEqual( box.max().x, x2 )\n\t\tself.assertAlmostEqual( box.max().y, y2 )\n\n\tdef test( self ) :\n\n\t\tc = IECoreScene.Camera()\n\t\tself.assertEqual( c.parameters(), IECore.CompoundData() )\n\n\t\tcc = c.copy()\n\t\tself.assertEqual( cc.parameters(), IECore.CompoundData() )\n\t\tself.assertEqual( cc, c )\n\n\t\tIECore.Writer.create( cc, os.path.join( \"test\", \"IECore\", \"data\", \"camera.cob\" ) ).write()\n\t\tccc = IECore.Reader.create( os.path.join( \"test\", \"IECore\", \"data\", \"camera.cob\" ) ).read()\n\n\t\tself.assertEqual( c, ccc )\n\n\t\tc.setFocalLength( 5 )\n\t\tself.assertEqual( c.getFocalLength(), 5 )\n\n\t\t# test copying and saving with some parameters\n\t\tcc = c.copy()\n\t\tself.assertEqual( cc, c )\n\n\t\tIECore.Writer.create( cc, os.path.join( \"test\", \"IECore\", \"data\", \"camera.cob\" ) ).write()\n\t\tccc = IECore.Reader.create( os.path.join( \"test\", \"IECore\", \"data\", \"camera.cob\" ) ).read()\n\t\tself.assertEqual( ccc, c )\n\n\tdef testCameraParameters( self ) :\n\n\t\tc = IECoreScene.Camera()\n\n\t\t# Defaults\n\t\tself.assertEqual( c.getProjection(), \"orthographic\" )\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 2, 2 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\t\tself.assertEqual( c.getFocalLength(), 1 )\n\t\tself.assertEqual( c.getClippingPlanes(), imath.V2f( 0.01, 100000 ) )\n\t\tself.assertEqual( c.getFStop(), 0 )\n\t\tself.assertAlmostEqual( c.getFocalLengthWorldScale(), 0.1 )\n\t\tself.assertEqual( c.getFocusDistance(), 1 )\n\n\t\t# Set some values\n\t\tc.setProjection(\"perspective\" )\n\t\tc.setAperture(imath.V2f( 36, 24 ) )\n\t\tc.setApertureOffset(imath.V2f( 1, -1 ) )\n\t\tc.setFocalLength( 35 )\n\t\tc.setClippingPlanes(imath.V2f( -10, 42 ) )\n\t\tc.setFStop( 3.0 )\n\t\tc.setFocalLengthWorldScale( 0.001 )\n\t\tc.setFocusDistance( 12.0 )\n\n\t\t# Test that we've got the new values\n\t\tself.assertEqual( c.getProjection(), \"perspective\" )\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 36, 24 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 1, -1 ) )\n\t\tself.assertEqual( c.getFocalLength(), 35 )\n\t\tself.assertEqual( c.getClippingPlanes(), imath.V2f( -10, 42 ) )\n\t\tself.assertEqual( c.getFStop(), 3.0 )\n\t\tself.assertAlmostEqual( c.getFocalLengthWorldScale(), 0.001 )\n\t\tself.assertEqual( c.getFocusDistance(), 12.0 )\n\n\tdef testDefaultApertureFromObseleteFovAndScreenWindow( self ) :\n\n\t\tc = IECoreScene.Camera()\n\t\tc.parameters()[\"resolution\"] = imath.V2i( 100, 100 )\n\t\tc.parameters()[\"projection\"] = \"perspective\"\n\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 2, 2 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\n\t\tc.parameters()[\"projection:fov\"] = 90.0\n\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 2, 2 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\n\t\tc.parameters()[\"projection:fov\"] = 60.0\n\t\tself.assertAlmostEqual( c.getAperture()[0], 1 / (3 ** 0.5) * 2, places = 6 )\n\t\tself.assertAlmostEqual( c.getAperture()[1], 1 / (3 ** 0.5) * 2, places = 6 )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\n\t\tc.parameters()[\"projection:fov\"] = 90.0\n\t\tc.setResolution( imath.V2i( 200, 100 ) )\n\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 4, 2 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\n\t\tc.setPixelAspectRatio( 2 )\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 8, 2 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\n\t\tc.parameters()[\"projection:fov\"] = 90.0\n\t\tc.setResolution( imath.V2i( 100, 100 ) )\n\t\tc.setPixelAspectRatio( 1 )\n\t\tc.parameters()[\"screenWindow\"] = imath.Box2f( imath.V2f( 10, -3 ), imath.V2f( 11, 5 ) )\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 1, 8 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 10.5, 1 ) )\n\n\t\tc.parameters()[\"screenWindow\"] = imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) )\n\t\tc.setFocalLength( 35 )\n\n\t\tself.assertEqual( c.getAperture(), imath.V2f( 70, 70 ) )\n\t\tself.assertEqual( c.getApertureOffset(), imath.V2f( 0, 0 ) )\n\n\tdef testRenderOverrides( self ):\n\t\tc = IECoreScene.Camera()\n\n\t\tself.assertEqual( c.hasFilmFit(), False )\n\t\tself.assertEqual( c.hasResolution(), False )\n\t\tself.assertEqual( c.hasPixelAspectRatio(), False )\n\t\tself.assertEqual( c.hasResolutionMultiplier(), False )\n\t\tself.assertEqual( c.hasOverscan(), False )\n\t\tself.assertEqual( c.hasOverscanLeft(), False )\n\t\tself.assertEqual( c.hasOverscanRight(), False )\n\t\tself.assertEqual( c.hasOverscanTop(), False )\n\t\tself.assertEqual( c.hasOverscanBottom(), False )\n\t\tself.assertEqual( c.hasCropWindow(), False )\n\t\tself.assertEqual( c.hasShutter(), False )\n\n\t\tc.setFilmFit( IECoreScene.Camera.FilmFit.Vertical )\n\t\tc.setResolution( imath.V2i( 1280, 720 ) )\n\t\tc.setPixelAspectRatio( 2 )\n\t\tc.setResolutionMultiplier( 0.5 )\n\t\tc.setOverscan( True )\n\t\tc.setOverscanLeft( 0.2 )\n\t\tc.setOverscanRight( 0.1 )\n\t\tc.setOverscanTop( 0.3 )\n\t\tc.setOverscanBottom( 0.4 )\n\t\tc.setCropWindow( imath.Box2f( imath.V2f( 0.1, 0.2 ), imath.V2f( 0.8, 0.9 ) ) )\n\t\tc.setShutter( imath.V2f( -0.7, 0.3 ) )\n\n\t\tself.assertEqual( c.hasFilmFit(), True )\n\t\tself.assertEqual( c.hasResolution(), True )\n\t\tself.assertEqual( c.hasPixelAspectRatio(), True )\n\t\tself.assertEqual( c.hasResolutionMultiplier(), True )\n\t\tself.assertEqual( c.hasOverscan(), True )\n\t\tself.assertEqual( c.hasOverscanLeft(), True )\n\t\tself.assertEqual( c.hasOverscanRight(), True )\n\t\tself.assertEqual( c.hasOverscanTop(), True )\n\t\tself.assertEqual( c.hasOverscanBottom(), True )\n\t\tself.assertEqual( c.hasCropWindow(), True )\n\t\tself.assertEqual( c.hasShutter(), True )\n\n\t\tself.assertEqual( c.getFilmFit(), IECoreScene.Camera.FilmFit.Vertical )\n\t\tself.assertEqual( c.getResolution(), imath.V2i( 1280, 720 ) )\n\t\tself.assertEqual( c.getPixelAspectRatio(), 2 )\n\t\tself.assertEqual( c.getResolutionMultiplier(), 0.5 )\n\t\tself.assertEqual( c.getOverscan(), True )\n\t\tself.assertAlmostEqual( c.getOverscanLeft(), 0.2 )\n\t\tself.assertAlmostEqual( c.getOverscanRight(), 0.1 )\n\t\tself.assertAlmostEqual( c.getOverscanTop(), 0.3 )\n\t\tself.assertAlmostEqual( c.getOverscanBottom(), 0.4 )\n\t\tself.assertBox2fEqual( c.getCropWindow(), 0.1, 0.2, 0.8, 0.9 )\n\t\tself.assertAlmostEqual( c.getShutter(), imath.V2f( -0.7, 0.3 ) )\n\n\t\tc.removeFilmFit()\n\t\tc.removeResolution()\n\t\tc.removePixelAspectRatio()\n\t\tc.removeResolutionMultiplier()\n\t\tc.removeOverscan()\n\t\tc.removeOverscanLeft()\n\t\tc.removeOverscanRight()\n\t\tc.removeOverscanTop()\n\t\tc.removeOverscanBottom()\n\t\tc.removeCropWindow()\n\t\tc.removeShutter()\n\n\t\tself.assertEqual( c.hasFilmFit(), False )\n\t\tself.assertEqual( c.hasResolution(), False )\n\t\tself.assertEqual( c.hasPixelAspectRatio(), False )\n\t\tself.assertEqual( c.hasResolutionMultiplier(), False )\n\t\tself.assertEqual( c.hasOverscan(), False )\n\t\tself.assertEqual( c.hasOverscanLeft(), False )\n\t\tself.assertEqual( c.hasOverscanRight(), False )\n\t\tself.assertEqual( c.hasOverscanTop(), False )\n\t\tself.assertEqual( c.hasOverscanBottom(), False )\n\t\tself.assertEqual( c.hasCropWindow(), False )\n\t\tself.assertEqual( c.hasShutter(), False )\n\n\tdef testHash( self ) :\n\t\tc = IECoreScene.Camera()\n\t\th = c.hash()\n\n\t\tc.setFocalLength( 12 )\n\t\tself.assertNotEqual( c.hash(), h )\n\t\th = c.hash()\n\n\tdef testNormalizedScreenWindow( self ):\n\t\tc = IECoreScene.Camera()\n\t\tself.assertBox2fEqual( c.frustum(), -1, -0.75, 1, 0.75 )\n\t\tc.setFocalLength( 2 )\n\t\tself.assertBox2fEqual( c.frustum(), -1, -0.75, 1, 0.75 )\n\t\tc.setProjection(\"perspective\" )\n\t\tself.assertBox2fEqual( c.frustum(), -0.5, -0.375, 0.5, 0.375 )\n\t\tc.setAperture(imath.V2f( 4, 4 ) )\n\t\tself.assertBox2fEqual( c.frustum(), -1, -0.75, 1, 0.75 )\n\t\tc.setApertureOffset(imath.V2f( 1, 1 ) )\n\t\tself.assertBox2fEqual( c.frustum(), -0.5, -0.25, 1.5, 1.25 )\n\t\tc.setFocalLength( 1 )\n\t\tself.assertBox2fEqual( c.frustum(), -1, -0.5, 3, 2.5 )\n\t\tc.setResolution(imath.V2i( 100, 100 ) )\n\t\tself.assertBox2fEqual( c.frustum(), -1, -1, 3, 3 )\n\n\tdef testRenderImageSpec( self ):\n\t\tdef B( x1, y1, x2, y2 ):\n\t\t\treturn imath.Box2i( imath.V2i( x1, y1 ), imath.V2i( x2, y2 ) )\n\n\t\tc = IECoreScene.Camera()\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 640, 480 ) )\n\t\tself.assertEqual( c.renderRegion(), B( 0, 0, 640, 480 ) )\n\t\tc.setResolution( imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderRegion(), B( 0, 0, 1920, 1080 ) )\n\t\tc.setOverscanLeft( 0.1 )\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderRegion(), B( 0, 0, 1920, 1080 ) )\n\t\tc.setOverscan( True )\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderRegion(), B( -192, 0, 1920, 1080 ) )\n\t\tc.setOverscanRight( 1.0 )\n\t\tc.setOverscanTop( 0.5 )\n\t\tc.setOverscanBottom( 0.25 )\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderRegion(), B( -192, -270, 3840, 1620 ) )\n\t\tc.setCropWindow( imath.Box2f( imath.V2f( 0, 0 ), imath.V2f( 1, 1 ) ) )\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderRegion(), B( 0, 0, 1920, 1080 ) )\n\t\tc.setCropWindow( imath.Box2f( imath.V2f( 0.2, 0.3 ), imath.V2f( 0.8, 0.5 ) ) )\n\t\tself.assertEqual( c.renderResolution(), imath.V2i( 1920, 1080 ) )\n\t\tself.assertEqual( c.renderRegion(), B( 384, 1080 - 540, 1536, 1080 - 324 ) )\n\n\tdef testFitWindow( self ):\n\t\tdef B( x1, y1, x2, y2 ):\n\t\t\treturn imath.Box2f( imath.V2f( x1, y1 ), imath.V2f( x2, y2 ) )\n\n\t\tFitMode = IECoreScene.Camera.FilmFit\n\t\tcc = IECoreScene.Camera\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Horizontal, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Vertical, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fit, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fill, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Distort, 1.0 ), -1, -1, 1, 1 )\n\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Horizontal, 1.0 ), -2, -2, 2, 2 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Vertical, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Fit, 1.0 ), -2, -2, 2, 2 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Fill, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-2, -1, 2, 1), FitMode.Distort, 1.0 ), -2, -1, 2, 1 )\n\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Horizontal, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Vertical, 1.0 ), -2, -2, 2, 2 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Fit, 1.0 ), -2, -2, 2, 2 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Fill, 1.0 ), -1, -1, 1, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -2, 1, 2), FitMode.Distort, 1.0 ), -1, -2, 1, 2 )\n\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Horizontal, 0.5 ), -1, -2, 1, 2 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Vertical, 0.5 ), -0.5, -1, 0.5, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fit, 0.5 ), -1, -2, 1, 2 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Fill, 0.5 ), -0.5, -1, 0.5, 1 )\n\t\tself.assertBox2fEqual( cc.fitWindow( B(-1, -1, 1, 1), FitMode.Distort, 0.5 ), -1, -1, 1, 1 )\n\n\n\tdef tearDown( self ) :\n\n\t\tif os.path.isfile( os.path.join( \"test\", \"IECore\", \"data\", \"camera.cob\" ) ) :\n\t\t\tos.remove( os.path.join( \"test\", \"IECore\", \"data\", \"camera.cob\" ) )\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import unittest\nimport os.path\nimport IECore\nimport IECoreScene\nimport imath\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n\n def testDefaultApertureFromObseleteFovAndScreenWindow(self):\n c = IECoreScene.Camera()\n c.parameters()['resolution'] = imath.V2i(100, 100)\n c.parameters()['projection'] = 'perspective'\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 60.0\n self.assertAlmostEqual(c.getAperture()[0], 1 / 3 ** 0.5 * 2, places=6)\n self.assertAlmostEqual(c.getAperture()[1], 1 / 3 ** 0.5 * 2, places=6)\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n c.setResolution(imath.V2i(200, 100))\n self.assertEqual(c.getAperture(), imath.V2f(4, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.setPixelAspectRatio(2)\n self.assertEqual(c.getAperture(), imath.V2f(8, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n c.setResolution(imath.V2i(100, 100))\n c.setPixelAspectRatio(1)\n c.parameters()['screenWindow'] = imath.Box2f(imath.V2f(10, -3),\n imath.V2f(11, 5))\n self.assertEqual(c.getAperture(), imath.V2f(1, 8))\n self.assertEqual(c.getApertureOffset(), imath.V2f(10.5, 1))\n c.parameters()['screenWindow'] = imath.Box2f(imath.V2f(-1), imath.\n V2f(1))\n c.setFocalLength(35)\n self.assertEqual(c.getAperture(), imath.V2f(70, 70))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n\n def testRenderOverrides(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n c.setFilmFit(IECoreScene.Camera.FilmFit.Vertical)\n c.setResolution(imath.V2i(1280, 720))\n c.setPixelAspectRatio(2)\n c.setResolutionMultiplier(0.5)\n c.setOverscan(True)\n c.setOverscanLeft(0.2)\n c.setOverscanRight(0.1)\n c.setOverscanTop(0.3)\n c.setOverscanBottom(0.4)\n c.setCropWindow(imath.Box2f(imath.V2f(0.1, 0.2), imath.V2f(0.8, 0.9)))\n c.setShutter(imath.V2f(-0.7, 0.3))\n self.assertEqual(c.hasFilmFit(), True)\n self.assertEqual(c.hasResolution(), True)\n self.assertEqual(c.hasPixelAspectRatio(), True)\n self.assertEqual(c.hasResolutionMultiplier(), True)\n self.assertEqual(c.hasOverscan(), True)\n self.assertEqual(c.hasOverscanLeft(), True)\n self.assertEqual(c.hasOverscanRight(), True)\n self.assertEqual(c.hasOverscanTop(), True)\n self.assertEqual(c.hasOverscanBottom(), True)\n self.assertEqual(c.hasCropWindow(), True)\n self.assertEqual(c.hasShutter(), True)\n self.assertEqual(c.getFilmFit(), IECoreScene.Camera.FilmFit.Vertical)\n self.assertEqual(c.getResolution(), imath.V2i(1280, 720))\n self.assertEqual(c.getPixelAspectRatio(), 2)\n self.assertEqual(c.getResolutionMultiplier(), 0.5)\n self.assertEqual(c.getOverscan(), True)\n self.assertAlmostEqual(c.getOverscanLeft(), 0.2)\n self.assertAlmostEqual(c.getOverscanRight(), 0.1)\n self.assertAlmostEqual(c.getOverscanTop(), 0.3)\n self.assertAlmostEqual(c.getOverscanBottom(), 0.4)\n self.assertBox2fEqual(c.getCropWindow(), 0.1, 0.2, 0.8, 0.9)\n self.assertAlmostEqual(c.getShutter(), imath.V2f(-0.7, 0.3))\n c.removeFilmFit()\n c.removeResolution()\n c.removePixelAspectRatio()\n c.removeResolutionMultiplier()\n c.removeOverscan()\n c.removeOverscanLeft()\n c.removeOverscanRight()\n c.removeOverscanTop()\n c.removeOverscanBottom()\n c.removeCropWindow()\n c.removeShutter()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n\n def testRenderImageSpec(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2i(imath.V2i(x1, y1), imath.V2i(x2, y2))\n c = IECoreScene.Camera()\n self.assertEqual(c.renderResolution(), imath.V2i(640, 480))\n self.assertEqual(c.renderRegion(), B(0, 0, 640, 480))\n c.setResolution(imath.V2i(1920, 1080))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscanLeft(0.1)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscan(True)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, 0, 1920, 1080))\n c.setOverscanRight(1.0)\n c.setOverscanTop(0.5)\n c.setOverscanBottom(0.25)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, -270, 3840, 1620))\n c.setCropWindow(imath.Box2f(imath.V2f(0, 0), imath.V2f(1, 1)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setCropWindow(imath.Box2f(imath.V2f(0.2, 0.3), imath.V2f(0.8, 0.5)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(384, 1080 - 540, 1536, 1080 - 324)\n )\n\n def testFitWindow(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2f(imath.V2f(x1, y1), imath.V2f(x2, y2))\n FitMode = IECoreScene.Camera.FilmFit\n cc = IECoreScene.Camera\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Horizontal, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Distort,\n 1.0), -2, -1, 2, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Vertical, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Distort,\n 1.0), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 0.5), -1, -1, 1, 1)\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n\n def testDefaultApertureFromObseleteFovAndScreenWindow(self):\n c = IECoreScene.Camera()\n c.parameters()['resolution'] = imath.V2i(100, 100)\n c.parameters()['projection'] = 'perspective'\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 60.0\n self.assertAlmostEqual(c.getAperture()[0], 1 / 3 ** 0.5 * 2, places=6)\n self.assertAlmostEqual(c.getAperture()[1], 1 / 3 ** 0.5 * 2, places=6)\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n c.setResolution(imath.V2i(200, 100))\n self.assertEqual(c.getAperture(), imath.V2f(4, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.setPixelAspectRatio(2)\n self.assertEqual(c.getAperture(), imath.V2f(8, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n c.setResolution(imath.V2i(100, 100))\n c.setPixelAspectRatio(1)\n c.parameters()['screenWindow'] = imath.Box2f(imath.V2f(10, -3),\n imath.V2f(11, 5))\n self.assertEqual(c.getAperture(), imath.V2f(1, 8))\n self.assertEqual(c.getApertureOffset(), imath.V2f(10.5, 1))\n c.parameters()['screenWindow'] = imath.Box2f(imath.V2f(-1), imath.\n V2f(1))\n c.setFocalLength(35)\n self.assertEqual(c.getAperture(), imath.V2f(70, 70))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n\n def testRenderOverrides(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n c.setFilmFit(IECoreScene.Camera.FilmFit.Vertical)\n c.setResolution(imath.V2i(1280, 720))\n c.setPixelAspectRatio(2)\n c.setResolutionMultiplier(0.5)\n c.setOverscan(True)\n c.setOverscanLeft(0.2)\n c.setOverscanRight(0.1)\n c.setOverscanTop(0.3)\n c.setOverscanBottom(0.4)\n c.setCropWindow(imath.Box2f(imath.V2f(0.1, 0.2), imath.V2f(0.8, 0.9)))\n c.setShutter(imath.V2f(-0.7, 0.3))\n self.assertEqual(c.hasFilmFit(), True)\n self.assertEqual(c.hasResolution(), True)\n self.assertEqual(c.hasPixelAspectRatio(), True)\n self.assertEqual(c.hasResolutionMultiplier(), True)\n self.assertEqual(c.hasOverscan(), True)\n self.assertEqual(c.hasOverscanLeft(), True)\n self.assertEqual(c.hasOverscanRight(), True)\n self.assertEqual(c.hasOverscanTop(), True)\n self.assertEqual(c.hasOverscanBottom(), True)\n self.assertEqual(c.hasCropWindow(), True)\n self.assertEqual(c.hasShutter(), True)\n self.assertEqual(c.getFilmFit(), IECoreScene.Camera.FilmFit.Vertical)\n self.assertEqual(c.getResolution(), imath.V2i(1280, 720))\n self.assertEqual(c.getPixelAspectRatio(), 2)\n self.assertEqual(c.getResolutionMultiplier(), 0.5)\n self.assertEqual(c.getOverscan(), True)\n self.assertAlmostEqual(c.getOverscanLeft(), 0.2)\n self.assertAlmostEqual(c.getOverscanRight(), 0.1)\n self.assertAlmostEqual(c.getOverscanTop(), 0.3)\n self.assertAlmostEqual(c.getOverscanBottom(), 0.4)\n self.assertBox2fEqual(c.getCropWindow(), 0.1, 0.2, 0.8, 0.9)\n self.assertAlmostEqual(c.getShutter(), imath.V2f(-0.7, 0.3))\n c.removeFilmFit()\n c.removeResolution()\n c.removePixelAspectRatio()\n c.removeResolutionMultiplier()\n c.removeOverscan()\n c.removeOverscanLeft()\n c.removeOverscanRight()\n c.removeOverscanTop()\n c.removeOverscanBottom()\n c.removeCropWindow()\n c.removeShutter()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n\n def testRenderImageSpec(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2i(imath.V2i(x1, y1), imath.V2i(x2, y2))\n c = IECoreScene.Camera()\n self.assertEqual(c.renderResolution(), imath.V2i(640, 480))\n self.assertEqual(c.renderRegion(), B(0, 0, 640, 480))\n c.setResolution(imath.V2i(1920, 1080))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscanLeft(0.1)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscan(True)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, 0, 1920, 1080))\n c.setOverscanRight(1.0)\n c.setOverscanTop(0.5)\n c.setOverscanBottom(0.25)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, -270, 3840, 1620))\n c.setCropWindow(imath.Box2f(imath.V2f(0, 0), imath.V2f(1, 1)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setCropWindow(imath.Box2f(imath.V2f(0.2, 0.3), imath.V2f(0.8, 0.5)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(384, 1080 - 540, 1536, 1080 - 324)\n )\n\n def testFitWindow(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2f(imath.V2f(x1, y1), imath.V2f(x2, y2))\n FitMode = IECoreScene.Camera.FilmFit\n cc = IECoreScene.Camera\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Horizontal, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Distort,\n 1.0), -2, -1, 2, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Vertical, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Distort,\n 1.0), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 0.5), -1, -1, 1, 1)\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n\n def testDefaultApertureFromObseleteFovAndScreenWindow(self):\n c = IECoreScene.Camera()\n c.parameters()['resolution'] = imath.V2i(100, 100)\n c.parameters()['projection'] = 'perspective'\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 60.0\n self.assertAlmostEqual(c.getAperture()[0], 1 / 3 ** 0.5 * 2, places=6)\n self.assertAlmostEqual(c.getAperture()[1], 1 / 3 ** 0.5 * 2, places=6)\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n c.setResolution(imath.V2i(200, 100))\n self.assertEqual(c.getAperture(), imath.V2f(4, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.setPixelAspectRatio(2)\n self.assertEqual(c.getAperture(), imath.V2f(8, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n c.parameters()['projection:fov'] = 90.0\n c.setResolution(imath.V2i(100, 100))\n c.setPixelAspectRatio(1)\n c.parameters()['screenWindow'] = imath.Box2f(imath.V2f(10, -3),\n imath.V2f(11, 5))\n self.assertEqual(c.getAperture(), imath.V2f(1, 8))\n self.assertEqual(c.getApertureOffset(), imath.V2f(10.5, 1))\n c.parameters()['screenWindow'] = imath.Box2f(imath.V2f(-1), imath.\n V2f(1))\n c.setFocalLength(35)\n self.assertEqual(c.getAperture(), imath.V2f(70, 70))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n\n def testRenderOverrides(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n c.setFilmFit(IECoreScene.Camera.FilmFit.Vertical)\n c.setResolution(imath.V2i(1280, 720))\n c.setPixelAspectRatio(2)\n c.setResolutionMultiplier(0.5)\n c.setOverscan(True)\n c.setOverscanLeft(0.2)\n c.setOverscanRight(0.1)\n c.setOverscanTop(0.3)\n c.setOverscanBottom(0.4)\n c.setCropWindow(imath.Box2f(imath.V2f(0.1, 0.2), imath.V2f(0.8, 0.9)))\n c.setShutter(imath.V2f(-0.7, 0.3))\n self.assertEqual(c.hasFilmFit(), True)\n self.assertEqual(c.hasResolution(), True)\n self.assertEqual(c.hasPixelAspectRatio(), True)\n self.assertEqual(c.hasResolutionMultiplier(), True)\n self.assertEqual(c.hasOverscan(), True)\n self.assertEqual(c.hasOverscanLeft(), True)\n self.assertEqual(c.hasOverscanRight(), True)\n self.assertEqual(c.hasOverscanTop(), True)\n self.assertEqual(c.hasOverscanBottom(), True)\n self.assertEqual(c.hasCropWindow(), True)\n self.assertEqual(c.hasShutter(), True)\n self.assertEqual(c.getFilmFit(), IECoreScene.Camera.FilmFit.Vertical)\n self.assertEqual(c.getResolution(), imath.V2i(1280, 720))\n self.assertEqual(c.getPixelAspectRatio(), 2)\n self.assertEqual(c.getResolutionMultiplier(), 0.5)\n self.assertEqual(c.getOverscan(), True)\n self.assertAlmostEqual(c.getOverscanLeft(), 0.2)\n self.assertAlmostEqual(c.getOverscanRight(), 0.1)\n self.assertAlmostEqual(c.getOverscanTop(), 0.3)\n self.assertAlmostEqual(c.getOverscanBottom(), 0.4)\n self.assertBox2fEqual(c.getCropWindow(), 0.1, 0.2, 0.8, 0.9)\n self.assertAlmostEqual(c.getShutter(), imath.V2f(-0.7, 0.3))\n c.removeFilmFit()\n c.removeResolution()\n c.removePixelAspectRatio()\n c.removeResolutionMultiplier()\n c.removeOverscan()\n c.removeOverscanLeft()\n c.removeOverscanRight()\n c.removeOverscanTop()\n c.removeOverscanBottom()\n c.removeCropWindow()\n c.removeShutter()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n\n def testRenderImageSpec(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2i(imath.V2i(x1, y1), imath.V2i(x2, y2))\n c = IECoreScene.Camera()\n self.assertEqual(c.renderResolution(), imath.V2i(640, 480))\n self.assertEqual(c.renderRegion(), B(0, 0, 640, 480))\n c.setResolution(imath.V2i(1920, 1080))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscanLeft(0.1)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscan(True)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, 0, 1920, 1080))\n c.setOverscanRight(1.0)\n c.setOverscanTop(0.5)\n c.setOverscanBottom(0.25)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, -270, 3840, 1620))\n c.setCropWindow(imath.Box2f(imath.V2f(0, 0), imath.V2f(1, 1)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setCropWindow(imath.Box2f(imath.V2f(0.2, 0.3), imath.V2f(0.8, 0.5)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(384, 1080 - 540, 1536, 1080 - 324)\n )\n\n def testFitWindow(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2f(imath.V2f(x1, y1), imath.V2f(x2, y2))\n FitMode = IECoreScene.Camera.FilmFit\n cc = IECoreScene.Camera\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Horizontal, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Distort,\n 1.0), -2, -1, 2, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Vertical, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Distort,\n 1.0), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 0.5), -1, -1, 1, 1)\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n <function token>\n\n def testRenderOverrides(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n c.setFilmFit(IECoreScene.Camera.FilmFit.Vertical)\n c.setResolution(imath.V2i(1280, 720))\n c.setPixelAspectRatio(2)\n c.setResolutionMultiplier(0.5)\n c.setOverscan(True)\n c.setOverscanLeft(0.2)\n c.setOverscanRight(0.1)\n c.setOverscanTop(0.3)\n c.setOverscanBottom(0.4)\n c.setCropWindow(imath.Box2f(imath.V2f(0.1, 0.2), imath.V2f(0.8, 0.9)))\n c.setShutter(imath.V2f(-0.7, 0.3))\n self.assertEqual(c.hasFilmFit(), True)\n self.assertEqual(c.hasResolution(), True)\n self.assertEqual(c.hasPixelAspectRatio(), True)\n self.assertEqual(c.hasResolutionMultiplier(), True)\n self.assertEqual(c.hasOverscan(), True)\n self.assertEqual(c.hasOverscanLeft(), True)\n self.assertEqual(c.hasOverscanRight(), True)\n self.assertEqual(c.hasOverscanTop(), True)\n self.assertEqual(c.hasOverscanBottom(), True)\n self.assertEqual(c.hasCropWindow(), True)\n self.assertEqual(c.hasShutter(), True)\n self.assertEqual(c.getFilmFit(), IECoreScene.Camera.FilmFit.Vertical)\n self.assertEqual(c.getResolution(), imath.V2i(1280, 720))\n self.assertEqual(c.getPixelAspectRatio(), 2)\n self.assertEqual(c.getResolutionMultiplier(), 0.5)\n self.assertEqual(c.getOverscan(), True)\n self.assertAlmostEqual(c.getOverscanLeft(), 0.2)\n self.assertAlmostEqual(c.getOverscanRight(), 0.1)\n self.assertAlmostEqual(c.getOverscanTop(), 0.3)\n self.assertAlmostEqual(c.getOverscanBottom(), 0.4)\n self.assertBox2fEqual(c.getCropWindow(), 0.1, 0.2, 0.8, 0.9)\n self.assertAlmostEqual(c.getShutter(), imath.V2f(-0.7, 0.3))\n c.removeFilmFit()\n c.removeResolution()\n c.removePixelAspectRatio()\n c.removeResolutionMultiplier()\n c.removeOverscan()\n c.removeOverscanLeft()\n c.removeOverscanRight()\n c.removeOverscanTop()\n c.removeOverscanBottom()\n c.removeCropWindow()\n c.removeShutter()\n self.assertEqual(c.hasFilmFit(), False)\n self.assertEqual(c.hasResolution(), False)\n self.assertEqual(c.hasPixelAspectRatio(), False)\n self.assertEqual(c.hasResolutionMultiplier(), False)\n self.assertEqual(c.hasOverscan(), False)\n self.assertEqual(c.hasOverscanLeft(), False)\n self.assertEqual(c.hasOverscanRight(), False)\n self.assertEqual(c.hasOverscanTop(), False)\n self.assertEqual(c.hasOverscanBottom(), False)\n self.assertEqual(c.hasCropWindow(), False)\n self.assertEqual(c.hasShutter(), False)\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n\n def testRenderImageSpec(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2i(imath.V2i(x1, y1), imath.V2i(x2, y2))\n c = IECoreScene.Camera()\n self.assertEqual(c.renderResolution(), imath.V2i(640, 480))\n self.assertEqual(c.renderRegion(), B(0, 0, 640, 480))\n c.setResolution(imath.V2i(1920, 1080))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscanLeft(0.1)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscan(True)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, 0, 1920, 1080))\n c.setOverscanRight(1.0)\n c.setOverscanTop(0.5)\n c.setOverscanBottom(0.25)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, -270, 3840, 1620))\n c.setCropWindow(imath.Box2f(imath.V2f(0, 0), imath.V2f(1, 1)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setCropWindow(imath.Box2f(imath.V2f(0.2, 0.3), imath.V2f(0.8, 0.5)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(384, 1080 - 540, 1536, 1080 - 324)\n )\n\n def testFitWindow(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2f(imath.V2f(x1, y1), imath.V2f(x2, y2))\n FitMode = IECoreScene.Camera.FilmFit\n cc = IECoreScene.Camera\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Horizontal, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Distort,\n 1.0), -2, -1, 2, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Vertical, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Distort,\n 1.0), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 0.5), -1, -1, 1, 1)\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n\n def testRenderImageSpec(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2i(imath.V2i(x1, y1), imath.V2i(x2, y2))\n c = IECoreScene.Camera()\n self.assertEqual(c.renderResolution(), imath.V2i(640, 480))\n self.assertEqual(c.renderRegion(), B(0, 0, 640, 480))\n c.setResolution(imath.V2i(1920, 1080))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscanLeft(0.1)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setOverscan(True)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, 0, 1920, 1080))\n c.setOverscanRight(1.0)\n c.setOverscanTop(0.5)\n c.setOverscanBottom(0.25)\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(-192, -270, 3840, 1620))\n c.setCropWindow(imath.Box2f(imath.V2f(0, 0), imath.V2f(1, 1)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(0, 0, 1920, 1080))\n c.setCropWindow(imath.Box2f(imath.V2f(0.2, 0.3), imath.V2f(0.8, 0.5)))\n self.assertEqual(c.renderResolution(), imath.V2i(1920, 1080))\n self.assertEqual(c.renderRegion(), B(384, 1080 - 540, 1536, 1080 - 324)\n )\n\n def testFitWindow(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2f(imath.V2f(x1, y1), imath.V2f(x2, y2))\n FitMode = IECoreScene.Camera.FilmFit\n cc = IECoreScene.Camera\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Horizontal, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Distort,\n 1.0), -2, -1, 2, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Vertical, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Distort,\n 1.0), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 0.5), -1, -1, 1, 1)\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n <function token>\n\n def testFitWindow(self):\n\n def B(x1, y1, x2, y2):\n return imath.Box2f(imath.V2f(x1, y1), imath.V2f(x2, y2))\n FitMode = IECoreScene.Camera.FilmFit\n cc = IECoreScene.Camera\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Horizontal, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.\n Vertical, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-2, -1, 2, 1), FitMode.Distort,\n 1.0), -2, -1, 2, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Horizontal, 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.\n Vertical, 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fit, \n 1.0), -2, -2, 2, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Fill, \n 1.0), -1, -1, 1, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -2, 1, 2), FitMode.Distort,\n 1.0), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Horizontal, 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.\n Vertical, 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fit, \n 0.5), -1, -2, 1, 2)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Fill, \n 0.5), -0.5, -1, 0.5, 1)\n self.assertBox2fEqual(cc.fitWindow(B(-1, -1, 1, 1), FitMode.Distort,\n 0.5), -1, -1, 1, 1)\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n\n def testCameraParameters(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.getProjection(), 'orthographic')\n self.assertEqual(c.getAperture(), imath.V2f(2, 2))\n self.assertEqual(c.getApertureOffset(), imath.V2f(0, 0))\n self.assertEqual(c.getFocalLength(), 1)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(0.01, 100000))\n self.assertEqual(c.getFStop(), 0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.1)\n self.assertEqual(c.getFocusDistance(), 1)\n c.setProjection('perspective')\n c.setAperture(imath.V2f(36, 24))\n c.setApertureOffset(imath.V2f(1, -1))\n c.setFocalLength(35)\n c.setClippingPlanes(imath.V2f(-10, 42))\n c.setFStop(3.0)\n c.setFocalLengthWorldScale(0.001)\n c.setFocusDistance(12.0)\n self.assertEqual(c.getProjection(), 'perspective')\n self.assertEqual(c.getAperture(), imath.V2f(36, 24))\n self.assertEqual(c.getApertureOffset(), imath.V2f(1, -1))\n self.assertEqual(c.getFocalLength(), 35)\n self.assertEqual(c.getClippingPlanes(), imath.V2f(-10, 42))\n self.assertEqual(c.getFStop(), 3.0)\n self.assertAlmostEqual(c.getFocalLengthWorldScale(), 0.001)\n self.assertEqual(c.getFocusDistance(), 12.0)\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n <function token>\n <function token>\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n <function token>\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n\n def testNormalizedScreenWindow(self):\n c = IECoreScene.Camera()\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setFocalLength(2)\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setProjection('perspective')\n self.assertBox2fEqual(c.frustum(), -0.5, -0.375, 0.5, 0.375)\n c.setAperture(imath.V2f(4, 4))\n self.assertBox2fEqual(c.frustum(), -1, -0.75, 1, 0.75)\n c.setApertureOffset(imath.V2f(1, 1))\n self.assertBox2fEqual(c.frustum(), -0.5, -0.25, 1.5, 1.25)\n c.setFocalLength(1)\n self.assertBox2fEqual(c.frustum(), -1, -0.5, 3, 2.5)\n c.setResolution(imath.V2i(100, 100))\n self.assertBox2fEqual(c.frustum(), -1, -1, 3, 3)\n <function token>\n <function token>\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n\n def test(self):\n c = IECoreScene.Camera()\n self.assertEqual(c.parameters(), IECore.CompoundData())\n cc = c.copy()\n self.assertEqual(cc.parameters(), IECore.CompoundData())\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(c, ccc)\n c.setFocalLength(5)\n self.assertEqual(c.getFocalLength(), 5)\n cc = c.copy()\n self.assertEqual(cc, c)\n IECore.Writer.create(cc, os.path.join('test', 'IECore', 'data',\n 'camera.cob')).write()\n ccc = IECore.Reader.create(os.path.join('test', 'IECore', 'data',\n 'camera.cob')).read()\n self.assertEqual(ccc, c)\n <function token>\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n <function token>\n <function token>\n <function token>\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n <function token>\n <function token>\n <function token>\n\n def tearDown(self):\n if os.path.isfile(os.path.join('test', 'IECore', 'data', 'camera.cob')\n ):\n os.remove(os.path.join('test', 'IECore', 'data', 'camera.cob'))\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n <function token>\n <function token>\n <function token>\n <function token>\n\n def testHash(self):\n c = IECoreScene.Camera()\n h = c.hash()\n c.setFocalLength(12)\n self.assertNotEqual(c.hash(), h)\n h = c.hash()\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n\n def assertBox2fEqual(self, box, x1, y1, x2, y2):\n self.assertAlmostEqual(box.min().x, x1)\n self.assertAlmostEqual(box.min().y, y1)\n self.assertAlmostEqual(box.max().x, x2)\n self.assertAlmostEqual(box.max().y, y2)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass TestCamera(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
98,458 |
ba4999f8f1fb55a652d5ab88c1df4e6784b8e110
|
from rest_framework import serializers
from .models import Order, Product
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ['id', 'product_id', 'qty', 'price', 'shop_id', 'customer_id']
class NewOrderSerializer(serializers.ModelSerializer):
vip = serializers.BooleanField()
class Meta:
model = Order
fields = ['product_id', 'qty', 'customer_id', 'vip']
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']
|
[
"from rest_framework import serializers\nfrom .models import Order, Product\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = ['id', 'product_id', 'qty', 'price', 'shop_id', 'customer_id']\n\n\nclass NewOrderSerializer(serializers.ModelSerializer):\n vip = serializers.BooleanField()\n\n class Meta:\n model = Order\n fields = ['product_id', 'qty', 'customer_id', 'vip']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']\n \n\n\n\n \n",
"from rest_framework import serializers\nfrom .models import Order, Product\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Order\n fields = ['id', 'product_id', 'qty', 'price', 'shop_id', 'customer_id']\n\n\nclass NewOrderSerializer(serializers.ModelSerializer):\n vip = serializers.BooleanField()\n\n\n class Meta:\n model = Order\n fields = ['product_id', 'qty', 'customer_id', 'vip']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']\n",
"<import token>\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Order\n fields = ['id', 'product_id', 'qty', 'price', 'shop_id', 'customer_id']\n\n\nclass NewOrderSerializer(serializers.ModelSerializer):\n vip = serializers.BooleanField()\n\n\n class Meta:\n model = Order\n fields = ['product_id', 'qty', 'customer_id', 'vip']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']\n",
"<import token>\n<class token>\n\n\nclass NewOrderSerializer(serializers.ModelSerializer):\n vip = serializers.BooleanField()\n\n\n class Meta:\n model = Order\n fields = ['product_id', 'qty', 'customer_id', 'vip']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']\n",
"<import token>\n<class token>\n\n\nclass NewOrderSerializer(serializers.ModelSerializer):\n <assignment token>\n\n\n class Meta:\n model = Order\n fields = ['product_id', 'qty', 'customer_id', 'vip']\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']\n",
"<import token>\n<class token>\n<class token>\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['product_id', 'stock_pcs', 'price', 'shop_id', 'vip']\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,459 |
df57d8e2733f218fa60c4ebc4053ed093287967c
|
# learning command line arguments usage
import sys
print ('The command line args are')
for i in sys.argv:
print(i)
print('Python paths:',sys.path, '\n')
|
[
"# learning command line arguments usage\n\nimport sys\n\nprint ('The command line args are')\n\nfor i in sys.argv:\n\tprint(i)\n\nprint('Python paths:',sys.path, '\\n')\n",
"import sys\nprint('The command line args are')\nfor i in sys.argv:\n print(i)\nprint('Python paths:', sys.path, '\\n')\n",
"<import token>\nprint('The command line args are')\nfor i in sys.argv:\n print(i)\nprint('Python paths:', sys.path, '\\n')\n",
"<import token>\n<code token>\n"
] | false |
98,460 |
aa81acc4b10be3031684a806608fa6c82c39bda1
|
import justpy as jp
import random
async def my_click(self, msg):
self.color = random.choice(['primary', 'secondary', 'accent', 'dark', 'positive',
'negative','info', 'warning'])
self.label = self.color
msg.page.dark = not msg.page.dark
await msg.page.set_dark_mode(msg.page.dark)
html_string = """
<div class="q-pa-md q-gutter-y-sm">
<q-toolbar class="text-primary">
<q-btn flat round dense icon="menu" />
<q-toolbar-title>
Toolbar
</q-toolbar-title>
<q-btn flat round dense icon="more_vert" />
</q-toolbar>
"""
def quasar_example():
wp = jp.QuasarPage(dark=True) # Load page in dark mode
# bar = jp.Div(classes='q-pa-md q-gutter-y-sm', a=wp)
# jp.Qtoolbar(classes='text-primary', a=bar)
c = jp.parse_html(html_string, a=wp)
d = jp.Div(classes='q-pa-md q-gutter-sm', a=wp)
jp.QBtn(color='primary', icon='mail', label='On Left',
a=d, click=my_click)
jp.QBtn(color='secondary', icon_right='mail', label='On Right',
a=d, click=my_click)
jp.QBtn(color='red', icon='mail', icon_right='send',
label='On Left and Right', a=d, click=my_click)
jp.Br(a=d)
jp.QBtn(icon='phone', label='Stacked', stack=True, glossy=True, color='purple', a=d, click=my_click)
return wp
jp.justpy(quasar_example)
|
[
"import justpy as jp\nimport random\n\nasync def my_click(self, msg):\n self.color = random.choice(['primary', 'secondary', 'accent', 'dark', 'positive',\n 'negative','info', 'warning'])\n self.label = self.color\n msg.page.dark = not msg.page.dark\n await msg.page.set_dark_mode(msg.page.dark)\n\nhtml_string = \"\"\"\n <div class=\"q-pa-md q-gutter-y-sm\">\n <q-toolbar class=\"text-primary\">\n <q-btn flat round dense icon=\"menu\" />\n <q-toolbar-title>\n Toolbar\n </q-toolbar-title>\n <q-btn flat round dense icon=\"more_vert\" />\n </q-toolbar>\n\"\"\"\n\n\ndef quasar_example():\n wp = jp.QuasarPage(dark=True) # Load page in dark mode\n # bar = jp.Div(classes='q-pa-md q-gutter-y-sm', a=wp)\n # jp.Qtoolbar(classes='text-primary', a=bar)\n c = jp.parse_html(html_string, a=wp)\n\n d = jp.Div(classes='q-pa-md q-gutter-sm', a=wp)\n \n jp.QBtn(color='primary', icon='mail', label='On Left',\n a=d, click=my_click)\n jp.QBtn(color='secondary', icon_right='mail', label='On Right',\n a=d, click=my_click)\n jp.QBtn(color='red', icon='mail', icon_right='send',\n label='On Left and Right', a=d, click=my_click)\n jp.Br(a=d)\n jp.QBtn(icon='phone', label='Stacked', stack=True, glossy=True, color='purple', a=d, click=my_click)\n return wp\n\njp.justpy(quasar_example)\n",
"import justpy as jp\nimport random\n\n\nasync def my_click(self, msg):\n self.color = random.choice(['primary', 'secondary', 'accent', 'dark',\n 'positive', 'negative', 'info', 'warning'])\n self.label = self.color\n msg.page.dark = not msg.page.dark\n await msg.page.set_dark_mode(msg.page.dark)\n\n\nhtml_string = \"\"\"\n <div class=\"q-pa-md q-gutter-y-sm\">\n <q-toolbar class=\"text-primary\">\n <q-btn flat round dense icon=\"menu\" />\n <q-toolbar-title>\n Toolbar\n </q-toolbar-title>\n <q-btn flat round dense icon=\"more_vert\" />\n </q-toolbar>\n\"\"\"\n\n\ndef quasar_example():\n wp = jp.QuasarPage(dark=True)\n c = jp.parse_html(html_string, a=wp)\n d = jp.Div(classes='q-pa-md q-gutter-sm', a=wp)\n jp.QBtn(color='primary', icon='mail', label='On Left', a=d, click=my_click)\n jp.QBtn(color='secondary', icon_right='mail', label='On Right', a=d,\n click=my_click)\n jp.QBtn(color='red', icon='mail', icon_right='send', label=\n 'On Left and Right', a=d, click=my_click)\n jp.Br(a=d)\n jp.QBtn(icon='phone', label='Stacked', stack=True, glossy=True, color=\n 'purple', a=d, click=my_click)\n return wp\n\n\njp.justpy(quasar_example)\n",
"<import token>\n\n\nasync def my_click(self, msg):\n self.color = random.choice(['primary', 'secondary', 'accent', 'dark',\n 'positive', 'negative', 'info', 'warning'])\n self.label = self.color\n msg.page.dark = not msg.page.dark\n await msg.page.set_dark_mode(msg.page.dark)\n\n\nhtml_string = \"\"\"\n <div class=\"q-pa-md q-gutter-y-sm\">\n <q-toolbar class=\"text-primary\">\n <q-btn flat round dense icon=\"menu\" />\n <q-toolbar-title>\n Toolbar\n </q-toolbar-title>\n <q-btn flat round dense icon=\"more_vert\" />\n </q-toolbar>\n\"\"\"\n\n\ndef quasar_example():\n wp = jp.QuasarPage(dark=True)\n c = jp.parse_html(html_string, a=wp)\n d = jp.Div(classes='q-pa-md q-gutter-sm', a=wp)\n jp.QBtn(color='primary', icon='mail', label='On Left', a=d, click=my_click)\n jp.QBtn(color='secondary', icon_right='mail', label='On Right', a=d,\n click=my_click)\n jp.QBtn(color='red', icon='mail', icon_right='send', label=\n 'On Left and Right', a=d, click=my_click)\n jp.Br(a=d)\n jp.QBtn(icon='phone', label='Stacked', stack=True, glossy=True, color=\n 'purple', a=d, click=my_click)\n return wp\n\n\njp.justpy(quasar_example)\n",
"<import token>\n\n\nasync def my_click(self, msg):\n self.color = random.choice(['primary', 'secondary', 'accent', 'dark',\n 'positive', 'negative', 'info', 'warning'])\n self.label = self.color\n msg.page.dark = not msg.page.dark\n await msg.page.set_dark_mode(msg.page.dark)\n\n\n<assignment token>\n\n\ndef quasar_example():\n wp = jp.QuasarPage(dark=True)\n c = jp.parse_html(html_string, a=wp)\n d = jp.Div(classes='q-pa-md q-gutter-sm', a=wp)\n jp.QBtn(color='primary', icon='mail', label='On Left', a=d, click=my_click)\n jp.QBtn(color='secondary', icon_right='mail', label='On Right', a=d,\n click=my_click)\n jp.QBtn(color='red', icon='mail', icon_right='send', label=\n 'On Left and Right', a=d, click=my_click)\n jp.Br(a=d)\n jp.QBtn(icon='phone', label='Stacked', stack=True, glossy=True, color=\n 'purple', a=d, click=my_click)\n return wp\n\n\njp.justpy(quasar_example)\n",
"<import token>\n<code token>\n<assignment token>\n\n\ndef quasar_example():\n wp = jp.QuasarPage(dark=True)\n c = jp.parse_html(html_string, a=wp)\n d = jp.Div(classes='q-pa-md q-gutter-sm', a=wp)\n jp.QBtn(color='primary', icon='mail', label='On Left', a=d, click=my_click)\n jp.QBtn(color='secondary', icon_right='mail', label='On Right', a=d,\n click=my_click)\n jp.QBtn(color='red', icon='mail', icon_right='send', label=\n 'On Left and Right', a=d, click=my_click)\n jp.Br(a=d)\n jp.QBtn(icon='phone', label='Stacked', stack=True, glossy=True, color=\n 'purple', a=d, click=my_click)\n return wp\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,461 |
7b5e91ca7a4d74ff08a2070480ed84b012279f19
|
from Data import *
from PlotDynamicAllocation import *
from pygraph import *
my_nifty=Data("../python/NIFTYdata.csv");
my_bse=Data("../python/BSE30data.csv");
my_bank=Data("../python/BANKNIFTYdata.csv")
my_finance=Data("../python/CNXFINANCEdata.csv")
my_nifty.createEarningsArray()
my_nifty.createDividendArray()
my_bse.createEarningsArray()
my_bse.createDividendArray()
#my_bse.plotVsDate('dividend')
print my_nifty.DynamicAllocationVer2(my_bse, 1)
#print my_bse.DynamicAllocationVer2(my_nifty, 1)
#for i in range(15,20):
# my_nifty.investUnderPe(i);
#print percentile(my_bank.pe, 13)
#print percentile(my_bank.pe, 17)
#my_bse.plotVsDate('index');
#plotDynamicAllocation(my_nifty, my_bse)
#print my_finance.DynamicAllocation(13, 22, 1, 'pe')
#plotDiffBetweenFinanceNiftyPe(my_nifty, my_bank, my_finance)
#my_nifty.plotIndexWithPeHighlight(20, 15, 17, "NiftyWithPEHighlight.png")
|
[
"\nfrom Data import *\nfrom PlotDynamicAllocation import *\nfrom pygraph import *\n\nmy_nifty=Data(\"../python/NIFTYdata.csv\");\nmy_bse=Data(\"../python/BSE30data.csv\");\nmy_bank=Data(\"../python/BANKNIFTYdata.csv\")\nmy_finance=Data(\"../python/CNXFINANCEdata.csv\")\nmy_nifty.createEarningsArray()\nmy_nifty.createDividendArray()\nmy_bse.createEarningsArray()\nmy_bse.createDividendArray()\n\n#my_bse.plotVsDate('dividend')\n\nprint my_nifty.DynamicAllocationVer2(my_bse, 1)\n#print my_bse.DynamicAllocationVer2(my_nifty, 1)\n#for i in range(15,20):\n# my_nifty.investUnderPe(i);\n\n#print percentile(my_bank.pe, 13)\n#print percentile(my_bank.pe, 17)\n#my_bse.plotVsDate('index');\n#plotDynamicAllocation(my_nifty, my_bse)\n#print my_finance.DynamicAllocation(13, 22, 1, 'pe')\n\n#plotDiffBetweenFinanceNiftyPe(my_nifty, my_bank, my_finance)\n#my_nifty.plotIndexWithPeHighlight(20, 15, 17, \"NiftyWithPEHighlight.png\")\n"
] | true |
98,462 |
840924b1f91d7115729cabc6a7a2905e8f775f01
|
# 람다를 보면 리스트 내포 방식과 유사한 것을 알 수 있다
a = list(range(100))
# 리스트 내포 방식
print([i * i for i in a])
# 람다식 사용 방식
print(list(map(lambda number: number * number, a)))
# 리스트 내포와 람다식 처리 결과는 동일하다.
# 짝수만 구하려고 한다면
print([i * i for i in a if i % 2 == 0])
'''
그럼 언제 리스트 내포 방식을 쓰고 언제 람다식을 써야 할까?
개취이다..
리스트 내포와 람다식 차이점
리스트 내포
결과가 리스트로 나온다..당연하지..[]로 감사고 처리하고 넣으니깐
즉 이말은 리스트가 하나 더 복제되서 메모리를 더 차지한다는 의미이다.
최근에는 오히려 람다식보다 더 많이 사용하는 구문이라고 한다.
람다식
map(), filter() 등의 람다 함수는 제너레이터 함수라서
내부의 데이터가 실제로 메모리 용량을 차지하는 것들은 아닙니다.
메모리 용량을 더 적게 사용하고 싶다면 람다식이 유리할 수 있겠다.
'''
|
[
"\n# 람다를 보면 리스트 내포 방식과 유사한 것을 알 수 있다\n\na = list(range(100))\n# 리스트 내포 방식\nprint([i * i for i in a])\n# 람다식 사용 방식\nprint(list(map(lambda number: number * number, a)))\n\n# 리스트 내포와 람다식 처리 결과는 동일하다.\n\n# 짝수만 구하려고 한다면\nprint([i * i for i in a if i % 2 == 0])\n\n'''\n그럼 언제 리스트 내포 방식을 쓰고 언제 람다식을 써야 할까?\n\t개취이다..\n\n리스트 내포와 람다식 차이점\n\t리스트 내포\n\t\t결과가 리스트로 나온다..당연하지..[]로 감사고 처리하고 넣으니깐\n\t\t즉 이말은 리스트가 하나 더 복제되서 메모리를 더 차지한다는 의미이다.\n\t\t최근에는 오히려 람다식보다 더 많이 사용하는 구문이라고 한다.\n\t람다식\n\t\tmap(), filter() 등의 람다 함수는 제너레이터 함수라서 \n\t\t내부의 데이터가 실제로 메모리 용량을 차지하는 것들은 아닙니다.\n\t\t메모리 용량을 더 적게 사용하고 싶다면 람다식이 유리할 수 있겠다.\n'''",
"a = list(range(100))\nprint([(i * i) for i in a])\nprint(list(map(lambda number: number * number, a)))\nprint([(i * i) for i in a if i % 2 == 0])\n<docstring token>\n",
"<assignment token>\nprint([(i * i) for i in a])\nprint(list(map(lambda number: number * number, a)))\nprint([(i * i) for i in a if i % 2 == 0])\n<docstring token>\n",
"<assignment token>\n<code token>\n<docstring token>\n"
] | false |
98,463 |
e22bf89ae7bed6722b9ef990c59857bbd45cb953
|
# correct solution to Google Code Jam 2014 Qualification - B
import sys
# input the number of test cases
testCases = int(raw_input())
kase = 1
def findOptimalTime(c, f, x):
currentRate = float(2.00)
timeSpent = float(0.00)
while True :
withFactory = (c/currentRate) + (x/(currentRate + f))
withoutFactory = x/currentRate
if (withFactory < withoutFactory):
timeSpent += (c/currentRate)
currentRate += f
else:
timeSpent += x/currentRate
break
return timeSpent
while (testCases > 0):
inputs = raw_input().split(" ")
c = float(inputs[0])
f = float(inputs[1])
x = float(inputs[2])
# format the output
bestTime = findOptimalTime(c,f,x)
outputString = "Case #{}: {}\n".format(kase,bestTime)
# render output
sys.stdout.write(outputString)
# decrease the testCase number by 1
testCases -= 1
kase += 1
|
[
"# correct solution to Google Code Jam 2014 Qualification - B\nimport sys\n\n# input the number of test cases\ntestCases = int(raw_input())\nkase = 1\n\ndef findOptimalTime(c, f, x):\n\tcurrentRate = float(2.00)\n\ttimeSpent = float(0.00)\n\n\twhile True :\n\t\twithFactory = (c/currentRate) + (x/(currentRate + f))\n\t\twithoutFactory = x/currentRate\n\n\t\tif (withFactory < withoutFactory):\n\t\t\ttimeSpent += (c/currentRate)\n\t\t\tcurrentRate += f\n\t\telse:\n\t\t\ttimeSpent += x/currentRate\n\t\t\tbreak\n\t\n\treturn timeSpent\n\t\n\t\n\nwhile (testCases > 0):\n\tinputs = raw_input().split(\" \")\n\tc = float(inputs[0])\n\tf = float(inputs[1])\n\tx = float(inputs[2])\n\t\n\t# format the output\n\tbestTime = findOptimalTime(c,f,x)\n\toutputString = \"Case #{}: {}\\n\".format(kase,bestTime)\n\n\t# render output\n\tsys.stdout.write(outputString)\n\n\t# decrease the testCase number by 1\n\ttestCases -= 1\n\tkase += 1\n\t\n\t\n\n\n",
"import sys\ntestCases = int(raw_input())\nkase = 1\n\n\ndef findOptimalTime(c, f, x):\n currentRate = float(2.0)\n timeSpent = float(0.0)\n while True:\n withFactory = c / currentRate + x / (currentRate + f)\n withoutFactory = x / currentRate\n if withFactory < withoutFactory:\n timeSpent += c / currentRate\n currentRate += f\n else:\n timeSpent += x / currentRate\n break\n return timeSpent\n\n\nwhile testCases > 0:\n inputs = raw_input().split(' ')\n c = float(inputs[0])\n f = float(inputs[1])\n x = float(inputs[2])\n bestTime = findOptimalTime(c, f, x)\n outputString = 'Case #{}: {}\\n'.format(kase, bestTime)\n sys.stdout.write(outputString)\n testCases -= 1\n kase += 1\n",
"<import token>\ntestCases = int(raw_input())\nkase = 1\n\n\ndef findOptimalTime(c, f, x):\n currentRate = float(2.0)\n timeSpent = float(0.0)\n while True:\n withFactory = c / currentRate + x / (currentRate + f)\n withoutFactory = x / currentRate\n if withFactory < withoutFactory:\n timeSpent += c / currentRate\n currentRate += f\n else:\n timeSpent += x / currentRate\n break\n return timeSpent\n\n\nwhile testCases > 0:\n inputs = raw_input().split(' ')\n c = float(inputs[0])\n f = float(inputs[1])\n x = float(inputs[2])\n bestTime = findOptimalTime(c, f, x)\n outputString = 'Case #{}: {}\\n'.format(kase, bestTime)\n sys.stdout.write(outputString)\n testCases -= 1\n kase += 1\n",
"<import token>\n<assignment token>\n\n\ndef findOptimalTime(c, f, x):\n currentRate = float(2.0)\n timeSpent = float(0.0)\n while True:\n withFactory = c / currentRate + x / (currentRate + f)\n withoutFactory = x / currentRate\n if withFactory < withoutFactory:\n timeSpent += c / currentRate\n currentRate += f\n else:\n timeSpent += x / currentRate\n break\n return timeSpent\n\n\nwhile testCases > 0:\n inputs = raw_input().split(' ')\n c = float(inputs[0])\n f = float(inputs[1])\n x = float(inputs[2])\n bestTime = findOptimalTime(c, f, x)\n outputString = 'Case #{}: {}\\n'.format(kase, bestTime)\n sys.stdout.write(outputString)\n testCases -= 1\n kase += 1\n",
"<import token>\n<assignment token>\n\n\ndef findOptimalTime(c, f, x):\n currentRate = float(2.0)\n timeSpent = float(0.0)\n while True:\n withFactory = c / currentRate + x / (currentRate + f)\n withoutFactory = x / currentRate\n if withFactory < withoutFactory:\n timeSpent += c / currentRate\n currentRate += f\n else:\n timeSpent += x / currentRate\n break\n return timeSpent\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,464 |
e6718818a8d06c7c9955ae2837314c8676d2ea2a
|
#!/usr/bin/python
import sensor_msgs.msg
import roslib; roslib.load_manifest("capsen_vision")
from capsen_vision.srv import *
import rospy
from getpc.srv import *
import sys
import time
import tf
import tf.transformations as tfm
from ik.helper import get_obj_capsentf
from ik.roshelper import pubFrame
import numpy as np
import geometry_msgs.msg
from ik.helper import Timer
from ik.roshelper import pose2list
from ik.roshelper import poselist2pose
from ik.helper import graspGripper
from ik.helper import pause
from ik.helper import get_bin_cnstr
from ik.helper import matrix_from_xyzquat
from ik.helper import matrix_from_xyzquat
from ik.roshelper import poseTransform
from ik.roshelper import lookupTransform
from ik.helper import transformBack
from capsen_vision.msg import ObjectConstraint
import pdb
import traceback
import copy
import random
from visualization_msgs.msg import *
from geometry_msgs.msg import *
from std_msgs.msg import *
import json
toHack = True
haveDelay = False
def get_filtered_pointcloud(obj_ids, bin_num, kinect_num):
global _pointcloud2_service_srv
with Timer('pointcloud2_service'):
service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num
req = GetObjectPointCloud2Request()
req.bin_num = bin_num
req.obj_id = obj_ids[0] # peterkty: need to pass in a list
print '\tWaiting for service up: ', service_name
rospy.wait_for_service(service_name)
try:
print '\tCalling service:', service_name
response = _pointcloud2_service_srv[kinect_num-1](req)
return response.pc2, response.foreground_mask
except:
print '\tCalling service:', service_name, 'failed'
print '\tencounters errors:', traceback.format_exc()
print '\tDid you call capsen.capsen.init()? Is camera connection good?'
return None, None
bin_cnstr = get_bin_cnstr()
def inside_bin(point, bin_num):
cnstr = bin_cnstr[bin_num]
# return (
# x > cnstr[0]+0.02 && x < cnstr[1]-0.02 &&
# y > cnstr[2]+0.1 && y < cnstr[3]-0.01 &&
# z > cnstr[4]+0.00 && z < cnstr[5]-0.02/* && z < cnstr[4]+obj_max_height*/);
if point[0] > cnstr[0]+0.015 and point[0] < cnstr[1]-0.015 and \
point[1] > cnstr[2]+0.1 and point[1] < cnstr[3]-0.01 and \
point[2] > cnstr[4]-0.02 and point[2] < cnstr[5]-0.02:
return True
# #todo: make the numbers out of python code
return False
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def _detectOneObject(obj_id, bin_num, kinect_num):
global _detect_one_object_srv
global br
print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
# filter the point cloud
pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num, kinect_num)
if pc is None:
return (None, None)
# string[] model_names
# sensor_msgs/PointCloud2 cloud # Note: this must be an organized point cloud (e.g., 640x480)
# bool[] foreground_mask
# bool find_exact_object_list # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
# ObjectConstraint[] constraints # These apply to all objects
# ---
# SceneHypothesis[] detections
# 2. prepare constraints
bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \ # left \ # back \ # front \ # bottom \ # top
ccnstr = []
tol = 0.9 # larger is more strict
(trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
# 2.1 right
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.2 left
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.3 back
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.4 front
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.5 floor
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.6 top
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.7 on floor
floor_thick = 0.03
ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]-floor_thick/2], trans,rot, pc.header.frame_id), tol, bin_num) )
# string model_name
# sensor_msgs/PointCloud2 cloud # Note: this must be an organized point cloud (e.g., 640x480)
# bool[] foreground_mask
# ObjectConstraint[] constraints
# geometry_msgs/Pose true_pose # for testing
# ---
#
# ObjectHypothesis[] detections
with Timer('detect_one_object'):
# detect using capsen
service_name = '/detection_service/detect_one_object'
req = DetectOneObjectRequest()
req.model_name = obj_id
req.cloud = pc
req.constraints = ccnstr
req.foreground_mask = foreground_mask
#req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]
print 'Waiting for service up: ', service_name
rospy.wait_for_service(service_name)
try:
print 'Calling service:', service_name
ret = _detect_one_object_srv(req)
# ret.detections is a list of capsen_vision/ObjectHypothesis
# string name
# geometry_msgs/Pose pose
# float32 score
# float32[] score_components
if len(ret.detections)>0:
print len(ret.detections), 'ObjectHypothesis returned, max score', ret.detections[0].score
for i in range(len(ret.detections)):
poselist_capsen_world = poseTransform(pose2list(ret.detections[i].pose), pc.header.frame_id, 'map', _tflistener)
cap_T_our = get_obj_capsentf(obj_id) # x,y,z,qx,qy,qz,qw
poselist_world = transformBack(cap_T_our, poselist_capsen_world) # transform to our desired pose
# check whether inside bin
poselist_shelf = poseTransform(poselist_world, 'map', 'shelf', _tflistener)
if inside_bin(poselist_shelf[0:3], bin_num):
#pubFrame(br, poselist_world, 'obj', 'map')
return (poselist_world, ret.detections[i].score)
else:
print 'reject hypo', i, 'because it is outside the target bin'
print 'No ObjectHypothesis satisfy hard bin constraint'
return (None, None)
else:
print 'No ObjectHypothesis returned'
return (None, None)
except:
print 'Calling service:', service_name, 'failed'
print 'encounters errors:', traceback.format_exc()
return (None, None)
# mode = 0: use 2 kinects only
# mode = 1: use realsense only
def detectOneObjectWithoutBinContents(target_obj_id, bin_num, mode = 0):
nretry = 3
maxPose = None
maxScore = -100 # some very small number
if mode == 0:
for j in range(1,3): # loop over 2 cams
for i in range(nretry):
retPose, retScore = _detectOneObject(target_obj_id, bin_num, j)
if retPose is not None and retScore > maxScore:
maxPose = retPose; maxScore = retScore;
pubFrame(br, maxPose, 'obj_final', 'map')
elif mode == 1: # realsense
for i in range(nretry*2):
retPose, retScore = _detectOneObject(target_obj_id, bin_num, 3)
if retPose is not None and retScore > maxScore:
maxPose = retPose; maxScore = retScore;
pubFrame(br, maxPose, 'obj_final', 'map')
else:
print 'Mode incorrect!'
return None
return maxPose
def createCapsenConstraint(cnstr_type, params, tolerance, bin_num):
constraint = ObjectConstraint();
constraint.type = cnstr_type
constraint.params = params
return constraint
def transformPlane(params, pt, trans, rot, target_frame_id):
# params = [a,b,c,d] -> ax+by+cz>=d, we ignore input d
# pt is a point on the plane [x,y,z] (a list)
global br
rotMat = tfm.quaternion_matrix(rot)
target_T_source = matrix_from_xyzquat(trans, rot)
normal = params[0:3]
normal_target = np.dot(rotMat, np.array(normal + [0])) [0:3]
pt_target = np.dot(target_T_source, np.array(pt + [1])) [0:3]
d_target = np.dot(normal_target, pt_target)
ret = normal_target.tolist() + [d_target]
#pubFrame(br, pt_target.tolist() + [0,0,0,1], 'bound:%f' % d_target, target_frame_id)
#print ret
#pause()
return ret
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
global _tflistener
global br
newscene = copy.deepcopy(scene)
for i in range(len(scene.objects)):
poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose), scene_frame_id, 'map', _tflistener)
cap_T_our = get_obj_capsentf(scene.objects[i].name) # x,y,z,qx,qy,qz,qw
poselist_world = transformBack(cap_T_our, poselist_capsen_world)
newscene.objects[i].pose = poselist2pose(poselist_world)
#pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
return newscene
def allObjectsInsideBin(scene, bin_num):
global _tflistener
for i in range(len(scene.objects)):
poselist_shelf = poseTransform(pose2list(scene.objects[i].pose), 'map', 'shelf', _tflistener)
if not inside_bin(poselist_shelf[0:3], bin_num):
return False
return True
def visualizeConstraint(cnstr, frame_id):
params = cnstr.params
#constraint.type = cnstr_type
pts = []
for x in np.linspace(-1, 1, num=100):
for y in np.linspace(-1, 1, num=100):
for z in np.linspace(0, 1, num=100):
val = params[0]*x + params[1]*y + params[2]*z
if val >= params[3]:
pts.append([x,y,z])
showPointMarker(pts, frame_id)
def showPointMarker(points, frame_id, offset=(0,0,0), orientation=(0,0,0,1)):
vis_pub = rospy.Publisher('visualization_marker', Marker, queue_size=10)
rospy.sleep(0.1)
marker = Marker()
marker.header.frame_id = frame_id
marker.type = marker.POINTS
marker.scale.x = 0.003
marker.scale.y = 0.003
marker.scale.z = 0.003
n = len(points)//3
for pt in points:
p = Point()
p.x = pt[0]
p.y = pt[1]
p.z = pt[2]
marker.points.append(p)
p = ColorRGBA()
p.r = 0
p.g = 0
p.b = 1
p.a = 1
marker.colors.append(p)
marker.pose.orientation.x = orientation[0]
marker.pose.orientation.y = orientation[1]
marker.pose.orientation.z = orientation[2]
marker.pose.orientation.w = orientation[3]
marker.pose.position.x = offset[0]
marker.pose.position.y = offset[1]
marker.pose.position.z = offset[2]
vis_pub.publish(marker)
rospy.sleep(0.1)
def allFalse(foreground_mask, cnt):
thres = 200
print '\tallFalse(): %d valid points, thres = %d' % (cnt, thres)
if cnt < thres:
return True
return False
def subsample(foreground_mask, cnt):
target_cnt = 5000
if cnt > target_cnt:
ratio = float(target_cnt) / cnt
for i in range(len(foreground_mask)):
foreground_mask[i] = foreground_mask[i] and random.uniform(0,1) < ratio
print '\tsubsample(): After subsample: %d valid points' % sum(foreground_mask)
return foreground_mask
def _detectObjects(obj_ids, bin_num, kinect_num):
# return pose, retScore
global _detect_objects_srv
global br
global _tflistener
print 'In', '_detectObjects', 'obj_ids:', obj_ids, 'bin_num:', bin_num
# 1. filter the point cloud
pc, foreground_mask = get_filtered_pointcloud(obj_ids, bin_num, kinect_num) # need to pass in list
if pc is None or foreground_mask is None:
return (None, None)
# 2. prepare constraints
bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \ # left \ # back \ # front \ # bottom \ # top
ccnstr = []
tol = 0.9 # larger is more strict
(trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
# 2.1 right
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.2 left
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.3 back
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.4 front
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.5 floor
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.6 top
ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )
# 2.7 on floor
floor_thick = 0.03
ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]+floor_thick*2], trans,rot, pc.header.frame_id), tol, bin_num) )
#visualizeConstraint(ccnstr[6], pc.header.frame_id)
#pause()
# 3. detect using capsen
with Timer('detect_objects'):
service_name = '/detection_service/detect_objects'
req = DetectObjectsRequest()
req.model_names = obj_ids
req.constraints = ccnstr
req.cloud = pc
req.foreground_mask = foreground_mask
sum_pt = sum(foreground_mask)
if allFalse(foreground_mask, sum_pt):
return (None, None)
foreground_mask = subsample(foreground_mask, sum_pt)
# outputfile = '/tmp/foreground_mask'
# with open(outputfile, 'w') as outfile:
# json.dump(foreground_mask, outfile)
# pause()
#req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)] # hack
req.find_exact_object_list = True
print '\tWaiting for service up: ', service_name
rospy.wait_for_service(service_name)
#pdb.set_trace()
try:
print '\tCalling service:', service_name
ret = _detect_objects_srv(req)
# ret.detections is a list of capsen_vision/SceneHypothesis
# [capsen_vision/SceneHypothesis]:
# std_msgs/Header header
# uint32 seq
# time stamp
# string frame_id
# capsen_vision/ObjectHypothesis[] objects
# string name
# geometry_msgs/Pose pose
# geometry_msgs/Point position
# float64 x
# float64 y
# float64 z
# geometry_msgs/Quaternion orientation
# float64 x
# float64 y
# float64 z
# float64 w
# float32 score
# float32[] score_components
# float32[2] errors
# float32 score
if len(ret.detections)>0:
print '\t', len(ret.detections), 'SceneHypothesis returned, max score', ret.detections[0].score
#print ret.detections
for i in range(len(ret.detections)):
scene = ret.detections[i]
nobj = len(scene.objects)
scene_desired = transformObjectsFromCapsenToDesiredFrame(scene, pc.header.frame_id)
if allObjectsInsideBin(scene_desired, bin_num):
return (scene_desired.objects, scene_desired.score)
#else:
#print 'reject scene hypo', i, 'because one object of it is outside the target bin'
print '\tNo SceneHypothesis satisfy hard bin constraint'
return (None, None)
else:
print '\tNo SceneHypothesis returned'
return (None, None)
except:
print '\tCalling service:', service_name, 'failed'
print '\tencounters errors:', traceback.format_exc()
return (None, None)
def findTargetInd(Objects, target_obj_id):
for i, obj in enumerate(Objects):
if obj.name == target_obj_id:
return i
return None
def detectObjects(target_obj_id, obj_ids, bin_num, mode = 0):
nretry = 3
maxObjects = None
maxScore = -100 # some very small number
if mode == 0:
for j in range(1,3): # loop over 2 cams
for i in range(nretry):
retObjects, retScore = _detectObjects(obj_ids, bin_num, j)
if retObjects is not None and retScore > maxScore:
target_ind = findTargetInd(retObjects, target_obj_id)
if target_ind is not None:
maxObjects = retObjects; maxScore = retScore;
maxPose = pose2list(retObjects[target_ind].pose)
pubFrame(br, maxPose, 'obj_final', 'map')
elif mode == 1: # realsense
for i in range(nretry):
retObjects, retScore = _detectObjects(obj_ids, bin_num, 3)
if retObjects is not None and retScore > maxScore:
target_ind = findTargetInd(retObjects, target_obj_id)
if target_ind is not None:
maxObjects = retObjects; maxScore = retScore;
maxPose = pose2list(retObjects[target_ind].pose)
pubFrame(br, maxPose, 'obj_final', 'map')
else:
print 'Mode incorrect!'
return (None, None)
return (maxObjects, maxScore)
def randomPoseScore(bin_num, withScore):
typical_poses = \
[[1.58220350742, 0.287826299667, 1.12025654316, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58204042912, -0.0443051755428, 1.12202310562, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58190357685, -0.323061853647, 1.12350583076, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58220350742, 0.287826299667, 0.901469767094, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58204042912, -0.0443051755428, 0.9014697670942, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58190357685, -0.323061853647, 0.901469767094, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58220350742, 0.287826299667, 0.658816933632, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58204042912, -0.0443051755428, 0.658816933632, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58190357685, -0.323061853647, 0.658816933632, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58220350742, 0.287826299667, 0.434227764606, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58204042912, -0.0443051755428, 0.434227764606, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],
[1.58190357685, -0.323061853647, 0.434227764606, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023]]
obj_pose = typical_poses[bin_num]
obj_pose[0] += random.uniform(-0.1, 0.1)
obj_pose[1] += random.uniform(-0.1, 0.1)
obj_pose[2] += random.uniform(-0.1, 0.1)
obj_pose[3:7] = tfm.random_quaternion(rand=None).tolist()
thres = 0
if withScore:
if random.uniform(0,1) >= thres:
return (obj_pose, random.uniform(0.1, 3))
else:
return None, None
else:
if random.uniform(0,1) >= thres:
return obj_pose
else:
return None
def detectOneObject(target_obj_id, obj_ids, bin_num, mode = 0, withScore = False):
# hack
if toHack:
# may want to have delay
if haveDelay:
nretry = 4
timeforcapsen = 3.0
time_ = nretry*timeforcapsen
if mode == 0:
time_ *= 2
rospy.sleep(time_)
print '[detectOneObject] simulated computation time %.2f sec' % time_
return randomPoseScore(bin_num, withScore)
retObjects, retScore = detectObjects(target_obj_id, obj_ids, bin_num, mode)
# find the target object
if retObjects is not None:
for obj in retObjects:
if obj.name == target_obj_id:
if withScore:
return (pose2list(obj.pose), retScore)
else:
return pose2list(obj.pose)
if withScore:
return (None, None)
else:
return None
initialized = False
def init():
global _pointcloud2_service_srv
global _detect_one_object_srv
global _detect_objects_srv
global _tflistener
global br
global initialized
if initialized: # already done
return
else:
initialized = True
_tflistener = tf.TransformListener()
br = tf.TransformBroadcaster() # for visualizing the detected frame
_pointcloud2_service_srv = []
kinect_num = 1
service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num
_pointcloud2_service_srv.append(rospy.ServiceProxy(service_name, GetObjectPointCloud2))
kinect_num = 2
service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num
_pointcloud2_service_srv.append(rospy.ServiceProxy(service_name, GetObjectPointCloud2))
kinect_num = 3
service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num
_pointcloud2_service_srv.append(rospy.ServiceProxy(service_name, GetObjectPointCloud2))
_detect_one_object_srv= rospy.ServiceProxy('/detection_service/detect_one_object', DetectOneObject)
_detect_objects_srv= rospy.ServiceProxy('/detection_service/detect_objects', DetectObjects)
rospy.sleep(0.5)
def main(argv=None):
global br
if argv is None:
argv = sys.argv
rospy.init_node('capsen_test', anonymous=True)
rospy.sleep(0.1)
init()
rospy.sleep(0.1)
#retObjects, retScore = detectObjects('expo_dry_erase_board_eraser', ['expo_dry_erase_board_eraser', 'elmers_washable_no_run_school_glue'], bin_num = 0, mode = 0)
# retObjects, retScore = detectObjects('expo_dry_erase_board_eraser', ['expo_dry_erase_board_eraser', 'elmers_washable_no_run_school_glue'], bin_num = 0, mode = 0)
# pose = pose2list(retObjects[0].pose)
# pubFrame(br, pose, 'obj', 'map')
# print 'Objects', retObjects
# pause()
obj_list = ['mommys_helper_outlet_plugs',
'kong_duck_dog_toy',
'first_years_take_and_toss_straw_cup',
'champion_copper_plus_spark_plug',
'mead_index_cards',
'laugh_out_loud_joke_book',
'highland_6539_self_stick_notes',
'elmers_washable_no_run_school_glue',
'stanley_66_052',
'genuine_joe_plastic_stir_sticks',
'safety_works_safety_glasses',
'munchkin_white_hot_duck_bath_toy'
# 'crayola_64_ct',
# 'dr_browns_bottle_brush',
# 'kyjen_squeakin_eggs_plush_puppies',
# 'expo_dry_erase_board_eraser',
# 'cheezit_big_original',
# 'kong_air_dog_squeakair_tennis_ball',
# 'safety_works_safety_glasses',
# 'genuine_joe_plastic_stir_sticks'
]
bin_contents_all = [
[ "mommys_helper_outlet_plugs", "mark_twain_huckleberry_finn" ],
[ "feline_greenies_dental_treats", "kong_duck_dog_toy" ],
[ "first_years_take_and_toss_straw_cup","kong_sitting_frog_dog_toy" ],
[ "paper_mate_12_count_mirado_black_warrior", "champion_copper_plus_spark_plug" ],
[ "mead_index_cards", "sharpie_accent_tank_style_highlighters" ],
[ "mommys_helper_outlet_plugs", "laugh_out_loud_joke_book" ],
[ "kyjen_squeakin_eggs_plush_puppies", "highland_6539_self_stick_notes" ],
[ "elmers_washable_no_run_school_glue", "champion_copper_plus_spark_plug" ],
[ "crayola_64_ct", "stanley_66_052" ],
[ "genuine_joe_plastic_stir_sticks", "expo_dry_erase_board_eraser" ],
[ "safety_works_safety_glasses" ],
[ "kong_air_dog_squeakair_tennis_ball", "munchkin_white_hot_duck_bath_toy" ]]
#for i, obj_id in enumerate(obj_list):
for i in range(5,12):
pose = detectOneObject(obj_list[i], bin_contents_all[i], i)
pubFrame(br, pose, 'obj_final', 'map')
print 'Pose', pose
pause()
if __name__ == "__main__":
sys.exit(main())
|
[
"#!/usr/bin/python\n\nimport sensor_msgs.msg\nimport roslib; roslib.load_manifest(\"capsen_vision\")\nfrom capsen_vision.srv import *\nimport rospy\nfrom getpc.srv import *\nimport sys\nimport time\nimport tf\nimport tf.transformations as tfm\nfrom ik.helper import get_obj_capsentf\nfrom ik.roshelper import pubFrame\nimport numpy as np\nimport geometry_msgs.msg\nfrom ik.helper import Timer\nfrom ik.roshelper import pose2list\nfrom ik.roshelper import poselist2pose\nfrom ik.helper import graspGripper\nfrom ik.helper import pause\nfrom ik.helper import get_bin_cnstr\nfrom ik.helper import matrix_from_xyzquat\nfrom ik.helper import matrix_from_xyzquat\nfrom ik.roshelper import poseTransform\nfrom ik.roshelper import lookupTransform\nfrom ik.helper import transformBack\nfrom capsen_vision.msg import ObjectConstraint\nimport pdb\nimport traceback\nimport copy\nimport random\nfrom visualization_msgs.msg import *\nfrom geometry_msgs.msg import *\nfrom std_msgs.msg import *\nimport json\n\n\ntoHack = True\nhaveDelay = False\n\ndef get_filtered_pointcloud(obj_ids, bin_num, kinect_num):\n global _pointcloud2_service_srv\n with Timer('pointcloud2_service'):\n service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num\n req = GetObjectPointCloud2Request()\n req.bin_num = bin_num\n req.obj_id = obj_ids[0] # peterkty: need to pass in a list\n print '\\tWaiting for service up: ', service_name\n rospy.wait_for_service(service_name)\n try:\n print '\\tCalling service:', service_name\n response = _pointcloud2_service_srv[kinect_num-1](req)\n return response.pc2, response.foreground_mask\n except:\n print '\\tCalling service:', service_name, 'failed'\n print '\\tencounters errors:', traceback.format_exc()\n print '\\tDid you call capsen.capsen.init()? Is camera connection good?'\n return None, None\n\nbin_cnstr = get_bin_cnstr()\n\ndef inside_bin(point, bin_num):\n cnstr = bin_cnstr[bin_num]\n \n # return (\n # x > cnstr[0]+0.02 && x < cnstr[1]-0.02 &&\n # y > cnstr[2]+0.1 && y < cnstr[3]-0.01 &&\n # z > cnstr[4]+0.00 && z < cnstr[5]-0.02/* && z < cnstr[4]+obj_max_height*/);\n \n if point[0] > cnstr[0]+0.015 and point[0] < cnstr[1]-0.015 and \\\n point[1] > cnstr[2]+0.1 and point[1] < cnstr[3]-0.01 and \\\n point[2] > cnstr[4]-0.02 and point[2] < cnstr[5]-0.02:\n return True\n \n # #todo: make the numbers out of python code\n return False\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef _detectOneObject(obj_id, bin_num, kinect_num):\n global _detect_one_object_srv\n global br\n \n print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num\n # filter the point cloud\n pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num, kinect_num)\n if pc is None:\n return (None, None)\n \n # string[] model_names\n # sensor_msgs/PointCloud2 cloud # Note: this must be an organized point cloud (e.g., 640x480)\n # bool[] foreground_mask\n # bool find_exact_object_list # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'\n # ObjectConstraint[] constraints # These apply to all objects\n # ---\n # SceneHypothesis[] detections\n \n \n # 2. prepare constraints\n \n bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \\ # left \\ # back \\ # front \\ # bottom \\ # top\n ccnstr = []\n \n tol = 0.9 # larger is more strict\n (trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)\n # 2.1 right\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.2 left\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.3 back\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.4 front\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.5 floor\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.6 top\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.7 on floor\n floor_thick = 0.03\n ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]-floor_thick/2], trans,rot, pc.header.frame_id), tol, bin_num) )\n # string model_name\n # sensor_msgs/PointCloud2 cloud # Note: this must be an organized point cloud (e.g., 640x480)\n # bool[] foreground_mask\n # ObjectConstraint[] constraints\n # geometry_msgs/Pose true_pose # for testing\n # ---\n # \n # ObjectHypothesis[] detections\n\n with Timer('detect_one_object'):\n # detect using capsen\n service_name = '/detection_service/detect_one_object'\n req = DetectOneObjectRequest()\n req.model_name = obj_id\n req.cloud = pc\n req.constraints = ccnstr\n req.foreground_mask = foreground_mask\n #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]\n \n print 'Waiting for service up: ', service_name\n rospy.wait_for_service(service_name)\n try:\n print 'Calling service:', service_name\n ret = _detect_one_object_srv(req)\n # ret.detections is a list of capsen_vision/ObjectHypothesis\n # string name\n # geometry_msgs/Pose pose\n # float32 score\n # float32[] score_components\n if len(ret.detections)>0:\n print len(ret.detections), 'ObjectHypothesis returned, max score', ret.detections[0].score\n for i in range(len(ret.detections)):\n poselist_capsen_world = poseTransform(pose2list(ret.detections[i].pose), pc.header.frame_id, 'map', _tflistener)\n \n cap_T_our = get_obj_capsentf(obj_id) # x,y,z,qx,qy,qz,qw\n poselist_world = transformBack(cap_T_our, poselist_capsen_world) # transform to our desired pose\n \n # check whether inside bin\n poselist_shelf = poseTransform(poselist_world, 'map', 'shelf', _tflistener)\n if inside_bin(poselist_shelf[0:3], bin_num):\n #pubFrame(br, poselist_world, 'obj', 'map')\n return (poselist_world, ret.detections[i].score)\n else:\n print 'reject hypo', i, 'because it is outside the target bin'\n print 'No ObjectHypothesis satisfy hard bin constraint'\n return (None, None)\n else:\n print 'No ObjectHypothesis returned'\n return (None, None)\n except:\n print 'Calling service:', service_name, 'failed'\n print 'encounters errors:', traceback.format_exc()\n return (None, None)\n\n\n# mode = 0: use 2 kinects only\n# mode = 1: use realsense only\ndef detectOneObjectWithoutBinContents(target_obj_id, bin_num, mode = 0):\n nretry = 3\n maxPose = None\n maxScore = -100 # some very small number\n if mode == 0:\n for j in range(1,3): # loop over 2 cams\n for i in range(nretry):\n retPose, retScore = _detectOneObject(target_obj_id, bin_num, j)\n if retPose is not None and retScore > maxScore:\n maxPose = retPose; maxScore = retScore;\n pubFrame(br, maxPose, 'obj_final', 'map')\n \n\n elif mode == 1: # realsense\n for i in range(nretry*2):\n retPose, retScore = _detectOneObject(target_obj_id, bin_num, 3)\n if retPose is not None and retScore > maxScore:\n maxPose = retPose; maxScore = retScore;\n pubFrame(br, maxPose, 'obj_final', 'map')\n \n else:\n print 'Mode incorrect!'\n return None\n \n return maxPose\n\n\ndef createCapsenConstraint(cnstr_type, params, tolerance, bin_num):\n constraint = ObjectConstraint(); \n constraint.type = cnstr_type\n constraint.params = params\n return constraint\n\ndef transformPlane(params, pt, trans, rot, target_frame_id):\n # params = [a,b,c,d] -> ax+by+cz>=d, we ignore input d\n # pt is a point on the plane [x,y,z] (a list) \n global br\n \n rotMat = tfm.quaternion_matrix(rot)\n target_T_source = matrix_from_xyzquat(trans, rot)\n normal = params[0:3]\n normal_target = np.dot(rotMat, np.array(normal + [0])) [0:3]\n pt_target = np.dot(target_T_source, np.array(pt + [1])) [0:3]\n d_target = np.dot(normal_target, pt_target)\n ret = normal_target.tolist() + [d_target]\n #pubFrame(br, pt_target.tolist() + [0,0,0,1], 'bound:%f' % d_target, target_frame_id)\n #print ret\n #pause()\n return ret\n\ndef transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):\n global _tflistener\n global br\n newscene = copy.deepcopy(scene)\n for i in range(len(scene.objects)):\n poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose), scene_frame_id, 'map', _tflistener)\n cap_T_our = get_obj_capsentf(scene.objects[i].name) # x,y,z,qx,qy,qz,qw\n poselist_world = transformBack(cap_T_our, poselist_capsen_world) \n newscene.objects[i].pose = poselist2pose(poselist_world)\n \n #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')\n return newscene\n\ndef allObjectsInsideBin(scene, bin_num):\n global _tflistener\n for i in range(len(scene.objects)):\n poselist_shelf = poseTransform(pose2list(scene.objects[i].pose), 'map', 'shelf', _tflistener)\n if not inside_bin(poselist_shelf[0:3], bin_num):\n return False\n \n return True\n\ndef visualizeConstraint(cnstr, frame_id):\n params = cnstr.params\n #constraint.type = cnstr_type\n pts = []\n for x in np.linspace(-1, 1, num=100):\n for y in np.linspace(-1, 1, num=100):\n for z in np.linspace(0, 1, num=100):\n val = params[0]*x + params[1]*y + params[2]*z \n if val >= params[3]:\n pts.append([x,y,z])\n \n showPointMarker(pts, frame_id)\n \ndef showPointMarker(points, frame_id, offset=(0,0,0), orientation=(0,0,0,1)):\n vis_pub = rospy.Publisher('visualization_marker', Marker, queue_size=10)\n rospy.sleep(0.1)\n marker = Marker()\n marker.header.frame_id = frame_id\n marker.type = marker.POINTS\n marker.scale.x = 0.003\n marker.scale.y = 0.003\n marker.scale.z = 0.003\n \n n = len(points)//3\n for pt in points:\n p = Point()\n p.x = pt[0]\n p.y = pt[1]\n p.z = pt[2]\n marker.points.append(p)\n \n p = ColorRGBA()\n p.r = 0\n p.g = 0\n p.b = 1\n p.a = 1\n marker.colors.append(p)\n \n marker.pose.orientation.x = orientation[0]\n marker.pose.orientation.y = orientation[1]\n marker.pose.orientation.z = orientation[2]\n marker.pose.orientation.w = orientation[3]\n marker.pose.position.x = offset[0]\n marker.pose.position.y = offset[1]\n marker.pose.position.z = offset[2]\n \n vis_pub.publish(marker)\n rospy.sleep(0.1)\n \n \ndef allFalse(foreground_mask, cnt):\n thres = 200\n print '\\tallFalse(): %d valid points, thres = %d' % (cnt, thres)\n if cnt < thres:\n return True\n return False\n\ndef subsample(foreground_mask, cnt):\n target_cnt = 5000\n if cnt > target_cnt:\n ratio = float(target_cnt) / cnt\n for i in range(len(foreground_mask)):\n foreground_mask[i] = foreground_mask[i] and random.uniform(0,1) < ratio\n \n \n print '\\tsubsample(): After subsample: %d valid points' % sum(foreground_mask)\n return foreground_mask\n\ndef _detectObjects(obj_ids, bin_num, kinect_num):\n # return pose, retScore\n global _detect_objects_srv\n global br\n global _tflistener\n \n print 'In', '_detectObjects', 'obj_ids:', obj_ids, 'bin_num:', bin_num\n \n # 1. filter the point cloud\n pc, foreground_mask = get_filtered_pointcloud(obj_ids, bin_num, kinect_num) # need to pass in list\n if pc is None or foreground_mask is None:\n return (None, None)\n \n \n \n # 2. prepare constraints\n \n bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \\ # left \\ # back \\ # front \\ # bottom \\ # top\n ccnstr = []\n \n tol = 0.9 # larger is more strict\n (trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)\n # 2.1 right\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.2 left\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.3 back\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.4 front\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.5 floor\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.6 top\n ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )\n # 2.7 on floor\n floor_thick = 0.03\n ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]+floor_thick*2], trans,rot, pc.header.frame_id), tol, bin_num) )\n #visualizeConstraint(ccnstr[6], pc.header.frame_id)\n #pause()\n \n # 3. detect using capsen\n with Timer('detect_objects'):\n service_name = '/detection_service/detect_objects'\n req = DetectObjectsRequest()\n req.model_names = obj_ids\n req.constraints = ccnstr\n req.cloud = pc\n req.foreground_mask = foreground_mask\n \n sum_pt = sum(foreground_mask)\n if allFalse(foreground_mask, sum_pt):\n return (None, None)\n foreground_mask = subsample(foreground_mask, sum_pt)\n \n # outputfile = '/tmp/foreground_mask'\n # with open(outputfile, 'w') as outfile:\n # json.dump(foreground_mask, outfile)\n # pause()\n #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)] # hack\n req.find_exact_object_list = True\n \n print '\\tWaiting for service up: ', service_name\n rospy.wait_for_service(service_name)\n \n #pdb.set_trace()\n try:\n print '\\tCalling service:', service_name\n ret = _detect_objects_srv(req)\n # ret.detections is a list of capsen_vision/SceneHypothesis\n # [capsen_vision/SceneHypothesis]:\n # std_msgs/Header header\n # uint32 seq\n # time stamp\n # string frame_id\n # capsen_vision/ObjectHypothesis[] objects\n # string name\n # geometry_msgs/Pose pose\n # geometry_msgs/Point position\n # float64 x\n # float64 y\n # float64 z\n # geometry_msgs/Quaternion orientation\n # float64 x\n # float64 y\n # float64 z\n # float64 w\n # float32 score\n # float32[] score_components\n # float32[2] errors\n # float32 score\n\n if len(ret.detections)>0:\n print '\\t', len(ret.detections), 'SceneHypothesis returned, max score', ret.detections[0].score\n #print ret.detections\n for i in range(len(ret.detections)):\n scene = ret.detections[i]\n nobj = len(scene.objects)\n \n scene_desired = transformObjectsFromCapsenToDesiredFrame(scene, pc.header.frame_id)\n if allObjectsInsideBin(scene_desired, bin_num):\n return (scene_desired.objects, scene_desired.score)\n #else:\n #print 'reject scene hypo', i, 'because one object of it is outside the target bin'\n print '\\tNo SceneHypothesis satisfy hard bin constraint'\n return (None, None)\n else:\n print '\\tNo SceneHypothesis returned'\n return (None, None)\n except:\n print '\\tCalling service:', service_name, 'failed'\n print '\\tencounters errors:', traceback.format_exc()\n return (None, None)\n\n\ndef findTargetInd(Objects, target_obj_id):\n for i, obj in enumerate(Objects):\n if obj.name == target_obj_id:\n return i\n return None\n\ndef detectObjects(target_obj_id, obj_ids, bin_num, mode = 0):\n nretry = 3\n maxObjects = None\n maxScore = -100 # some very small number\n if mode == 0:\n for j in range(1,3): # loop over 2 cams\n for i in range(nretry):\n retObjects, retScore = _detectObjects(obj_ids, bin_num, j)\n if retObjects is not None and retScore > maxScore:\n target_ind = findTargetInd(retObjects, target_obj_id)\n if target_ind is not None:\n maxObjects = retObjects; maxScore = retScore;\n maxPose = pose2list(retObjects[target_ind].pose)\n pubFrame(br, maxPose, 'obj_final', 'map')\n\n elif mode == 1: # realsense\n for i in range(nretry):\n retObjects, retScore = _detectObjects(obj_ids, bin_num, 3)\n if retObjects is not None and retScore > maxScore:\n target_ind = findTargetInd(retObjects, target_obj_id)\n if target_ind is not None:\n maxObjects = retObjects; maxScore = retScore;\n maxPose = pose2list(retObjects[target_ind].pose)\n pubFrame(br, maxPose, 'obj_final', 'map')\n \n else:\n print 'Mode incorrect!'\n return (None, None)\n \n return (maxObjects, maxScore)\n\ndef randomPoseScore(bin_num, withScore):\n typical_poses = \\\n [[1.58220350742, 0.287826299667, 1.12025654316, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58204042912, -0.0443051755428, 1.12202310562, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58190357685, -0.323061853647, 1.12350583076, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58220350742, 0.287826299667, 0.901469767094, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58204042912, -0.0443051755428, 0.9014697670942, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58190357685, -0.323061853647, 0.901469767094, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58220350742, 0.287826299667, 0.658816933632, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58204042912, -0.0443051755428, 0.658816933632, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58190357685, -0.323061853647, 0.658816933632, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58220350742, 0.287826299667, 0.434227764606, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58204042912, -0.0443051755428, 0.434227764606, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023],\n [1.58190357685, -0.323061853647, 0.434227764606, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023]]\n obj_pose = typical_poses[bin_num]\n obj_pose[0] += random.uniform(-0.1, 0.1)\n obj_pose[1] += random.uniform(-0.1, 0.1)\n obj_pose[2] += random.uniform(-0.1, 0.1)\n obj_pose[3:7] = tfm.random_quaternion(rand=None).tolist()\n thres = 0\n if withScore:\n if random.uniform(0,1) >= thres:\n return (obj_pose, random.uniform(0.1, 3))\n else:\n return None, None\n else:\n if random.uniform(0,1) >= thres:\n return obj_pose\n else:\n return None\n \ndef detectOneObject(target_obj_id, obj_ids, bin_num, mode = 0, withScore = False):\n # hack\n if toHack:\n # may want to have delay\n if haveDelay:\n nretry = 4\n timeforcapsen = 3.0\n time_ = nretry*timeforcapsen\n if mode == 0:\n time_ *= 2\n rospy.sleep(time_)\n print '[detectOneObject] simulated computation time %.2f sec' % time_\n return randomPoseScore(bin_num, withScore)\n \n retObjects, retScore = detectObjects(target_obj_id, obj_ids, bin_num, mode)\n # find the target object\n if retObjects is not None:\n for obj in retObjects:\n if obj.name == target_obj_id:\n if withScore:\n return (pose2list(obj.pose), retScore)\n else:\n return pose2list(obj.pose)\n \n if withScore:\n return (None, None)\n else:\n return None\n \ninitialized = False\n\ndef init():\n global _pointcloud2_service_srv\n global _detect_one_object_srv\n global _detect_objects_srv\n global _tflistener\n global br\n global initialized\n \n if initialized: # already done\n return\n else:\n initialized = True\n \n _tflistener = tf.TransformListener()\n br = tf.TransformBroadcaster() # for visualizing the detected frame\n \n _pointcloud2_service_srv = []\n \n kinect_num = 1\n service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num\n _pointcloud2_service_srv.append(rospy.ServiceProxy(service_name, GetObjectPointCloud2))\n \n kinect_num = 2\n service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num\n _pointcloud2_service_srv.append(rospy.ServiceProxy(service_name, GetObjectPointCloud2))\n \n kinect_num = 3\n service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num\n _pointcloud2_service_srv.append(rospy.ServiceProxy(service_name, GetObjectPointCloud2))\n \n _detect_one_object_srv= rospy.ServiceProxy('/detection_service/detect_one_object', DetectOneObject)\n \n _detect_objects_srv= rospy.ServiceProxy('/detection_service/detect_objects', DetectObjects)\n \n rospy.sleep(0.5)\n \n\n\ndef main(argv=None):\n global br\n if argv is None:\n argv = sys.argv\n \n rospy.init_node('capsen_test', anonymous=True)\n rospy.sleep(0.1)\n init()\n rospy.sleep(0.1)\n \n \n #retObjects, retScore = detectObjects('expo_dry_erase_board_eraser', ['expo_dry_erase_board_eraser', 'elmers_washable_no_run_school_glue'], bin_num = 0, mode = 0)\n # retObjects, retScore = detectObjects('expo_dry_erase_board_eraser', ['expo_dry_erase_board_eraser', 'elmers_washable_no_run_school_glue'], bin_num = 0, mode = 0)\n # pose = pose2list(retObjects[0].pose)\n # pubFrame(br, pose, 'obj', 'map')\n # print 'Objects', retObjects\n # pause()\n \n obj_list = ['mommys_helper_outlet_plugs', \n 'kong_duck_dog_toy',\n 'first_years_take_and_toss_straw_cup',\n 'champion_copper_plus_spark_plug',\n 'mead_index_cards',\n 'laugh_out_loud_joke_book',\n 'highland_6539_self_stick_notes',\n 'elmers_washable_no_run_school_glue',\n 'stanley_66_052',\n 'genuine_joe_plastic_stir_sticks',\n 'safety_works_safety_glasses',\n 'munchkin_white_hot_duck_bath_toy'\n # 'crayola_64_ct',\n # 'dr_browns_bottle_brush',\n # 'kyjen_squeakin_eggs_plush_puppies',\n # 'expo_dry_erase_board_eraser',\n # 'cheezit_big_original',\n # 'kong_air_dog_squeakair_tennis_ball',\n # 'safety_works_safety_glasses',\n # 'genuine_joe_plastic_stir_sticks'\n ]\n \n\n bin_contents_all = [\n [ \"mommys_helper_outlet_plugs\", \"mark_twain_huckleberry_finn\" ],\n [ \"feline_greenies_dental_treats\", \"kong_duck_dog_toy\" ],\n [ \"first_years_take_and_toss_straw_cup\",\"kong_sitting_frog_dog_toy\" ],\n [ \"paper_mate_12_count_mirado_black_warrior\", \"champion_copper_plus_spark_plug\" ],\n [ \"mead_index_cards\", \"sharpie_accent_tank_style_highlighters\" ],\n [ \"mommys_helper_outlet_plugs\", \"laugh_out_loud_joke_book\" ],\n [ \"kyjen_squeakin_eggs_plush_puppies\", \"highland_6539_self_stick_notes\" ],\n [ \"elmers_washable_no_run_school_glue\", \"champion_copper_plus_spark_plug\" ],\n [ \"crayola_64_ct\", \"stanley_66_052\" ],\n [ \"genuine_joe_plastic_stir_sticks\", \"expo_dry_erase_board_eraser\" ],\n [ \"safety_works_safety_glasses\" ],\n [ \"kong_air_dog_squeakair_tennis_ball\", \"munchkin_white_hot_duck_bath_toy\" ]]\n \n #for i, obj_id in enumerate(obj_list):\n for i in range(5,12):\n pose = detectOneObject(obj_list[i], bin_contents_all[i], i)\n pubFrame(br, pose, 'obj_final', 'map')\n print 'Pose', pose\n pause()\n\n \n \nif __name__ == \"__main__\":\n sys.exit(main())\n\n\n\n"
] | true |
98,465 |
88ea507a9db65606417c83e20bd18020291f75d4
|
#!/usr/bin/env python
import os
from multiprocessing import Pool, Manager, Queue
from clang.cindex import Index, TranslationUnit, CursorKind
from crange import *
def dbkeeper(queue, opts):
tagdb = TagDB(opts.outputFile)
indexed_files = set()
while True:
ast = queue.get()
if ast is not 0:
for loc, nodes in ast.iteritems():
if loc not in indexed_files:
print "Indexing %s (nodes: %s, qsize: %s)" % (loc, len(nodes), queue.qsize() + 1)
indexed_files.add(loc)
tagdb.persist(nodes)
else:
tagdb.create_index()
break
def worker(worker_params):
source, count, queue, opts, args = worker_params
index = Index.create()
c = CrTags()
c.opts = opts
c.args = args
clang_line = [source, '-Iinclude']
try:
tu = index.parse(None, clang_line)
c.get_info(tu.cursor)
if len(c.ast) > 0:
queue.put(c.ast)
c.ast.clear()
c.debug("Parsing %s (count: %s)" % (source, count+1))
except Exception as e:
print "Error parsing %s: %s" % (source, e.message)
def spawn_workers(opts, args):
root = args[0]
sfo = SourceFile()
pool = Pool(opts.jobs)
manager = Manager()
queue = manager.Queue()
try:
# Fire dbkeeper process.
pool.apply_async(dbkeeper, (queue, opts))
# Then fire AST worker processes.
worker_params = ((s, count, queue, opts, args) for count,s in enumerate(sfo.locate(root)))
pool.map(worker, worker_params)
# Kill the dbkeeper loop.
queue.put(0)
# Close and reap the worker processes.
pool.close()
pool.join()
except KeyboardInterrupt:
print "Terminating workers"
pool.terminate()
pool.join()
if __name__ == '__main__':
parser = crtags_parser()
opts, args = parser.parse_args()
root = args[0]
if len(args) == 0:
parser.error('Invalid number of arguments')
if not os.path.isdir(root):
parser.error("%s is not a directory" % root)
else:
spawn_workers(opts, args)
|
[
"#!/usr/bin/env python\nimport os\n\nfrom multiprocessing import Pool, Manager, Queue\nfrom clang.cindex import Index, TranslationUnit, CursorKind\nfrom crange import *\n\ndef dbkeeper(queue, opts):\n tagdb = TagDB(opts.outputFile)\n indexed_files = set()\n while True:\n ast = queue.get()\n if ast is not 0:\n for loc, nodes in ast.iteritems():\n if loc not in indexed_files:\n print \"Indexing %s (nodes: %s, qsize: %s)\" % (loc, len(nodes), queue.qsize() + 1)\n indexed_files.add(loc)\n tagdb.persist(nodes)\n else:\n tagdb.create_index()\n break\n\ndef worker(worker_params):\n source, count, queue, opts, args = worker_params\n index = Index.create()\n c = CrTags()\n c.opts = opts\n c.args = args\n clang_line = [source, '-Iinclude']\n try:\n tu = index.parse(None, clang_line)\n c.get_info(tu.cursor)\n if len(c.ast) > 0:\n queue.put(c.ast)\n c.ast.clear()\n c.debug(\"Parsing %s (count: %s)\" % (source, count+1))\n except Exception as e:\n print \"Error parsing %s: %s\" % (source, e.message)\n\ndef spawn_workers(opts, args):\n root = args[0]\n sfo = SourceFile()\n pool = Pool(opts.jobs)\n manager = Manager()\n queue = manager.Queue()\n\n try:\n # Fire dbkeeper process.\n pool.apply_async(dbkeeper, (queue, opts))\n\n # Then fire AST worker processes.\n worker_params = ((s, count, queue, opts, args) for count,s in enumerate(sfo.locate(root)))\n pool.map(worker, worker_params)\n\n # Kill the dbkeeper loop.\n queue.put(0)\n\n # Close and reap the worker processes.\n pool.close()\n pool.join()\n except KeyboardInterrupt:\n print \"Terminating workers\"\n pool.terminate()\n pool.join()\n\nif __name__ == '__main__':\n parser = crtags_parser()\n opts, args = parser.parse_args()\n root = args[0] \n\n if len(args) == 0:\n parser.error('Invalid number of arguments')\n\n if not os.path.isdir(root):\n parser.error(\"%s is not a directory\" % root)\n else:\n spawn_workers(opts, args)\n"
] | true |
98,466 |
09b6f25328e5bd7087c085f20b33bcbade6332ba
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.analysis.pourbaix.maker import PourbaixDiagram
from pymatgen.analysis.pourbaix.entry import PourbaixEntryIO
try:
from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter
from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer
except ImportError:
PourbaixAnalyzer = None
test_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'test_files')
@unittest.skipIf(PourbaixAnalyzer is None, "ImportError while importing PourbaixAnalyzer")
class TestPourbaixPlotter(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(elements, entries) = PourbaixEntryIO.from_csv(os.path.join(module_dir,
"test_entries.csv"))
self.num_simplices = {"Zn(s)": 7, "ZnO2(s)": 7, "Zn[2+]": 4, "ZnO2[2-]": 4, "ZnHO2[-]": 4}
self.e_above_hull_test = {"ZnHO[+]": 0.0693, "ZnO(aq)": 0.0624}
self.decomp_test = {"ZnHO[+]": {"ZnO(s)": 0.5, "Zn[2+]": 0.5}, "ZnO(aq)": {"ZnO(s)": 1.0}}
self.pd = PourbaixDiagram(entries)
self.plotter = PourbaixPlotter(self.pd)
def test_plot_pourbaix(self):
plt = self.plotter.get_pourbaix_plot(limits=[[-2, 14], [-3, 3]])
def test_get_entry_stability(self):
entry = self.pd.all_entries[0]
plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])
if __name__ == '__main__':
unittest.main()
|
[
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import unicode_literals\n\nimport unittest\nimport os\n\nfrom pymatgen.analysis.pourbaix.maker import PourbaixDiagram\nfrom pymatgen.analysis.pourbaix.entry import PourbaixEntryIO\n\ntry:\n from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter\n from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer\nexcept ImportError:\n PourbaixAnalyzer = None\n\ntest_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'test_files')\n\[email protected](PourbaixAnalyzer is None, \"ImportError while importing PourbaixAnalyzer\")\nclass TestPourbaixPlotter(unittest.TestCase):\n\n def setUp(self):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n (elements, entries) = PourbaixEntryIO.from_csv(os.path.join(module_dir,\n \"test_entries.csv\"))\n self.num_simplices = {\"Zn(s)\": 7, \"ZnO2(s)\": 7, \"Zn[2+]\": 4, \"ZnO2[2-]\": 4, \"ZnHO2[-]\": 4}\n self.e_above_hull_test = {\"ZnHO[+]\": 0.0693, \"ZnO(aq)\": 0.0624}\n self.decomp_test = {\"ZnHO[+]\": {\"ZnO(s)\": 0.5, \"Zn[2+]\": 0.5}, \"ZnO(aq)\": {\"ZnO(s)\": 1.0}}\n self.pd = PourbaixDiagram(entries)\n self.plotter = PourbaixPlotter(self.pd)\n\n def test_plot_pourbaix(self):\n plt = self.plotter.get_pourbaix_plot(limits=[[-2, 14], [-3, 3]])\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])\n\nif __name__ == '__main__':\n unittest.main()\n",
"from __future__ import unicode_literals\nimport unittest\nimport os\nfrom pymatgen.analysis.pourbaix.maker import PourbaixDiagram\nfrom pymatgen.analysis.pourbaix.entry import PourbaixEntryIO\ntry:\n from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter\n from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer\nexcept ImportError:\n PourbaixAnalyzer = None\ntest_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',\n 'test_files')\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n\n def setUp(self):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n elements, entries = PourbaixEntryIO.from_csv(os.path.join(\n module_dir, 'test_entries.csv'))\n self.num_simplices = {'Zn(s)': 7, 'ZnO2(s)': 7, 'Zn[2+]': 4,\n 'ZnO2[2-]': 4, 'ZnHO2[-]': 4}\n self.e_above_hull_test = {'ZnHO[+]': 0.0693, 'ZnO(aq)': 0.0624}\n self.decomp_test = {'ZnHO[+]': {'ZnO(s)': 0.5, 'Zn[2+]': 0.5},\n 'ZnO(aq)': {'ZnO(s)': 1.0}}\n self.pd = PourbaixDiagram(entries)\n self.plotter = PourbaixPlotter(self.pd)\n\n def test_plot_pourbaix(self):\n plt = self.plotter.get_pourbaix_plot(limits=[[-2, 14], [-3, 3]])\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-\n 3, 3]])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\ntry:\n from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter\n from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer\nexcept ImportError:\n PourbaixAnalyzer = None\ntest_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',\n 'test_files')\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n\n def setUp(self):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n elements, entries = PourbaixEntryIO.from_csv(os.path.join(\n module_dir, 'test_entries.csv'))\n self.num_simplices = {'Zn(s)': 7, 'ZnO2(s)': 7, 'Zn[2+]': 4,\n 'ZnO2[2-]': 4, 'ZnHO2[-]': 4}\n self.e_above_hull_test = {'ZnHO[+]': 0.0693, 'ZnO(aq)': 0.0624}\n self.decomp_test = {'ZnHO[+]': {'ZnO(s)': 0.5, 'Zn[2+]': 0.5},\n 'ZnO(aq)': {'ZnO(s)': 1.0}}\n self.pd = PourbaixDiagram(entries)\n self.plotter = PourbaixPlotter(self.pd)\n\n def test_plot_pourbaix(self):\n plt = self.plotter.get_pourbaix_plot(limits=[[-2, 14], [-3, 3]])\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-\n 3, 3]])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\ntry:\n from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter\n from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer\nexcept ImportError:\n PourbaixAnalyzer = None\n<assignment token>\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n\n def setUp(self):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n elements, entries = PourbaixEntryIO.from_csv(os.path.join(\n module_dir, 'test_entries.csv'))\n self.num_simplices = {'Zn(s)': 7, 'ZnO2(s)': 7, 'Zn[2+]': 4,\n 'ZnO2[2-]': 4, 'ZnHO2[-]': 4}\n self.e_above_hull_test = {'ZnHO[+]': 0.0693, 'ZnO(aq)': 0.0624}\n self.decomp_test = {'ZnHO[+]': {'ZnO(s)': 0.5, 'Zn[2+]': 0.5},\n 'ZnO(aq)': {'ZnO(s)': 1.0}}\n self.pd = PourbaixDiagram(entries)\n self.plotter = PourbaixPlotter(self.pd)\n\n def test_plot_pourbaix(self):\n plt = self.plotter.get_pourbaix_plot(limits=[[-2, 14], [-3, 3]])\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-\n 3, 3]])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n<code token>\n<assignment token>\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n\n def setUp(self):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n elements, entries = PourbaixEntryIO.from_csv(os.path.join(\n module_dir, 'test_entries.csv'))\n self.num_simplices = {'Zn(s)': 7, 'ZnO2(s)': 7, 'Zn[2+]': 4,\n 'ZnO2[2-]': 4, 'ZnHO2[-]': 4}\n self.e_above_hull_test = {'ZnHO[+]': 0.0693, 'ZnO(aq)': 0.0624}\n self.decomp_test = {'ZnHO[+]': {'ZnO(s)': 0.5, 'Zn[2+]': 0.5},\n 'ZnO(aq)': {'ZnO(s)': 1.0}}\n self.pd = PourbaixDiagram(entries)\n self.plotter = PourbaixPlotter(self.pd)\n\n def test_plot_pourbaix(self):\n plt = self.plotter.get_pourbaix_plot(limits=[[-2, 14], [-3, 3]])\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-\n 3, 3]])\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n\n def setUp(self):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n elements, entries = PourbaixEntryIO.from_csv(os.path.join(\n module_dir, 'test_entries.csv'))\n self.num_simplices = {'Zn(s)': 7, 'ZnO2(s)': 7, 'Zn[2+]': 4,\n 'ZnO2[2-]': 4, 'ZnHO2[-]': 4}\n self.e_above_hull_test = {'ZnHO[+]': 0.0693, 'ZnO(aq)': 0.0624}\n self.decomp_test = {'ZnHO[+]': {'ZnO(s)': 0.5, 'Zn[2+]': 0.5},\n 'ZnO(aq)': {'ZnO(s)': 1.0}}\n self.pd = PourbaixDiagram(entries)\n self.plotter = PourbaixPlotter(self.pd)\n <function token>\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-\n 3, 3]])\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n <function token>\n <function token>\n\n def test_get_entry_stability(self):\n entry = self.pd.all_entries[0]\n plt = self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-\n 3, 3]])\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n\n\[email protected](PourbaixAnalyzer is None,\n 'ImportError while importing PourbaixAnalyzer')\nclass TestPourbaixPlotter(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
98,467 |
e76763911f3a2c82615af0659904c8d55890e5ed
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.starting_page, name="starting-page"), # for landing page
]
|
[
"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.starting_page, name=\"starting-page\"), # for landing page\n \n]",
"from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.starting_page, name='starting-page')]\n",
"<import token>\nurlpatterns = [path('', views.starting_page, name='starting-page')]\n",
"<import token>\n<assignment token>\n"
] | false |
98,468 |
8e1e5a96ebdf380078d9967a707bcdd082c5cfc9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
beautifulsoup4>=4.5.1
click>=6.6
Flask>=0.11.1
Flask-Script>=2.0.5
Flask-SQLAlchemy>=2.1
html5lib>=0.999999999
itsdangerous>=0.24
Jinja2>=2.8
MarkupSafe>=0.23
numpy>=1.11.1
pandas>=0.18.1
pymongo>=3.3.0
python-dateutil>=2.5.3
pytz>=2016.6.1
six>=1.10.0
SQLAlchemy>=1.2.14
webencodings>=0.5
Werkzeug>=0.11.11
celery>=4.2.1
requests>=2.20.1
flask_principal>=0.4.0
flask_login>=0.4.1
flask_bcrypt>=0.7.1
flask_cache>=0.13.1
cookiejar>=0.0.2
pymysql>=0.9.2
gevent>=1.4.0
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nbeautifulsoup4>=4.5.1\nclick>=6.6\nFlask>=0.11.1\nFlask-Script>=2.0.5\nFlask-SQLAlchemy>=2.1\nhtml5lib>=0.999999999\nitsdangerous>=0.24\nJinja2>=2.8\nMarkupSafe>=0.23\nnumpy>=1.11.1\npandas>=0.18.1\npymongo>=3.3.0\npython-dateutil>=2.5.3\npytz>=2016.6.1\nsix>=1.10.0\nSQLAlchemy>=1.2.14\nwebencodings>=0.5\nWerkzeug>=0.11.11\n\ncelery>=4.2.1\nrequests>=2.20.1\nflask_principal>=0.4.0\nflask_login>=0.4.1 \nflask_bcrypt>=0.7.1\nflask_cache>=0.13.1\ncookiejar>=0.0.2\npymysql>=0.9.2\n\ngevent>=1.4.0 "
] | true |
98,469 |
eb9b09f970c7800d59148b97bb7dfea3493757ad
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, HttpResponse
from models import Books
def index(request):
book1 = Books.objects.create(title = 'Star Wars A New Hope', author = 'John Guy', published_date = 'January 21, 1977', category = 'science fiction', in_print = True)
book2 = Books.objects.create(title = 'How to Win at Life', author = 'Bart Winnington', published_date = 'May 6, 1987', category = 'self help', in_print = False)
book3 = Books.objects.create(title = 'Lost in Space', author = 'Bill Guymore', published_date = 'October 22, 1923', category = 'fiction', in_print = False)
book4 = Books.objects.create(title = 'My Life', author = 'Bill Clinton', published_date = 'June 8, 1999', category = 'memoir', in_print = True)
book5 = Books.objects.create(title = '1984', author = 'George Orwell', published_date = 'July 12, 1946', category = 'dystopian', in_print = True)
book6 = Books.objects.create(title = 'Winning at all costs', author = 'Barty Brownstone', published_date = 'August 19, 2007', category = 'non-fiction', in_print = True)
book7 = Books.objects.create(title = 'The Art of Winning', author = 'Guy Winski', published_date = 'October 6, 2012', category = 'non-fiction', in_print = True)
allBooks = Books.objects.all()
for book in allBooks:
print 'ID:', book.id, '\nTitle:', book.title, '\nAuthor:', book.author, '\nDate Published:', book.published_date, '\nCategory:', book.category, '\nCirculation:', book.in_print
return render(request, 'booksApp/index.html')
|
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom models import Books\n\ndef index(request):\n\tbook1 = Books.objects.create(title = 'Star Wars A New Hope', author = 'John Guy', published_date = 'January 21, 1977', category = 'science fiction', in_print = True)\n\tbook2 = Books.objects.create(title = 'How to Win at Life', author = 'Bart Winnington', published_date = 'May 6, 1987', category = 'self help', in_print = False)\n\tbook3 = Books.objects.create(title = 'Lost in Space', author = 'Bill Guymore', published_date = 'October 22, 1923', category = 'fiction', in_print = False)\n\tbook4 = Books.objects.create(title = 'My Life', author = 'Bill Clinton', published_date = 'June 8, 1999', category = 'memoir', in_print = True)\n\tbook5 = Books.objects.create(title = '1984', author = 'George Orwell', published_date = 'July 12, 1946', category = 'dystopian', in_print = True)\n\tbook6 = Books.objects.create(title = 'Winning at all costs', author = 'Barty Brownstone', published_date = 'August 19, 2007', category = 'non-fiction', in_print = True)\n\tbook7 = Books.objects.create(title = 'The Art of Winning', author = 'Guy Winski', published_date = 'October 6, 2012', category = 'non-fiction', in_print = True)\n\tallBooks = Books.objects.all()\n\tfor book in allBooks:\n\t\tprint 'ID:', book.id, '\\nTitle:', book.title, '\\nAuthor:', book.author, '\\nDate Published:', book.published_date, '\\nCategory:', book.category, '\\nCirculation:', book.in_print\n\treturn render(request, 'booksApp/index.html')\n\t\n\t"
] | true |
98,470 |
433d7a7b2408def04b82579b060188383510b2ae
|
sentence = str(input('Write a sentence: ')).replace(' ', '').upper().strip()
newSentence = sentence[::-1]
# for i in range(len(sentence) -1, -1, -1):
# newSentence += sentence[i]
print('The inverse of {} is {}'.format(sentence, newSentence))
if (newSentence == sentence):
print('Your sentence is a palindrome')
else:
print('Your sentence is not a palindrome')
|
[
"sentence = str(input('Write a sentence: ')).replace(' ', '').upper().strip()\nnewSentence = sentence[::-1]\n# for i in range(len(sentence) -1, -1, -1):\n# newSentence += sentence[i]\n\nprint('The inverse of {} is {}'.format(sentence, newSentence))\nif (newSentence == sentence):\n print('Your sentence is a palindrome')\nelse:\n print('Your sentence is not a palindrome')",
"sentence = str(input('Write a sentence: ')).replace(' ', '').upper().strip()\nnewSentence = sentence[::-1]\nprint('The inverse of {} is {}'.format(sentence, newSentence))\nif newSentence == sentence:\n print('Your sentence is a palindrome')\nelse:\n print('Your sentence is not a palindrome')\n",
"<assignment token>\nprint('The inverse of {} is {}'.format(sentence, newSentence))\nif newSentence == sentence:\n print('Your sentence is a palindrome')\nelse:\n print('Your sentence is not a palindrome')\n",
"<assignment token>\n<code token>\n"
] | false |
98,471 |
7771eb52085535ae117ab357e683360e5e33edaf
|
import os
import tempfile
from yggdrasil import backwards, platform
from yggdrasil.communication import CommBase
from yggdrasil.schema import register_component, inherit_schema
from yggdrasil.serialize.DirectSerialize import DirectSerialize
@register_component
class FileComm(CommBase.CommBase):
r"""Class for handling I/O from/to a file on disk.
>>> x = FileComm('test_send', address='test_file.txt', direction='send')
>>> x.send('Test message')
True
>>> with open('test_file.txt', 'r') as fd:
... print(fd.read())
Test message
>>> x = FileComm('test_recv', address='test_file.txt', direction='recv')
>>> x.recv()
(True, b'Test message')
Args:
name (str): The environment variable where communication address is
stored.
read_meth (str, optional): Method that should be used to read data
from the file. Defaults to 'read'. Ignored if direction is 'send'.
append (bool, optional): If True and writing, file is openned in append
mode. Defaults to False.
in_temp (bool, optional): If True, the path will be considered relative
to the platform temporary directory. Defaults to False.
open_as_binary (bool, optional): If True, the file is opened in binary
mode. Defaults to True.
newline (str, optional): String indicating a new line. Defaults to
serialize._default_newline.
is_series (bool, optional): If True, input/output will be done to
a series of files. If reading, each file will be processed until
the end is reached. If writing, each output will be to a new
file in the series. The addressed is assumed to contain a format
for the index of the file. Defaults to False.
wait_for_creation (float, optional): Time (in seconds) that should be
waited before opening for the file to be created if it dosn't exist.
Defaults to 0 s and file will attempt to be opened immediately.
**kwargs: Additional keywords arguments are passed to parent class.
Attributes:
fd (file): File that should be read/written.
read_meth (str): Method that should be used to read data from the file.
append (bool): If True and writing, file is openned in append mode.
in_temp (bool): If True, the path will be considered relative to the
platform temporary directory.
open_as_binary (bool): If True, the file is opened in binary mode.
newline (str): String indicating a new line.
is_series (bool): If True, input/output will be done to a series of
files. If reading, each file will be processed until the end is
reached. If writing, each output will be to a new file in the series.
platform_newline (str): String indicating a newline on the current
platform.
Raises:
ValueError: If the read_meth is not one of the supported values.
"""
_filetype = 'binary'
_datatype = {'type': 'bytes'}
_schema_type = 'file'
_schema_required = ['name', 'filetype', 'working_dir']
_schema_properties = inherit_schema(
CommBase.CommBase._schema_properties,
{'working_dir': {'type': 'string'},
'filetype': {'type': 'string', 'default': _filetype},
'append': {'type': 'boolean', 'default': False},
'in_temp': {'type': 'boolean', 'default': False},
'is_series': {'type': 'boolean', 'default': False},
'wait_for_creation': {'type': 'float', 'default': 0.0}},
remove_keys=['commtype', 'datatype'], **DirectSerialize._schema_properties)
_default_serializer = DirectSerialize
_attr_conv = ['newline', 'platform_newline']
_default_extension = '.txt'
is_file = True
_maxMsgSize = 0
def __init__(self, *args, **kwargs):
kwargs.setdefault('close_on_eof_send', True)
return super(FileComm, self).__init__(*args, **kwargs)
def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs):
r"""Get absolute path and set attributes."""
super(FileComm, self)._init_before_open(**kwargs)
# Process file class keywords
if not hasattr(self, '_fd'):
self._fd = None
if read_meth not in ['read', 'readline']:
raise ValueError("read_meth '%s' not supported." % read_meth)
self.read_meth = read_meth
self.platform_newline = platform._newline
if self.in_temp:
self.address = os.path.join(tempfile.gettempdir(), self.address)
self.address = os.path.abspath(self.address)
self.open_as_binary = open_as_binary
self._series_index = 0
# Put string attributes in the correct format
if self.open_as_binary:
func_conv = backwards.as_bytes
else:
func_conv = backwards.as_unicode
for k in self._attr_conv:
v = getattr(self, k)
if v is not None:
setattr(self, k, func_conv(v))
@classmethod
def get_testing_options(cls, read_meth='read', open_as_binary=True, **kwargs):
r"""Method to return a dictionary of testing options for this class.
Returns:
dict: Dictionary of variables to use for testing. Key/value pairs:
kwargs (dict): Keyword arguments for comms tested with the
provided content.
send (list): List of objects to send to test file.
recv (list): List of objects that will be received from a test
file that was sent the messages in 'send'.
contents (bytes): Bytes contents of test file created by sending
the messages in 'send'.
"""
out = super(FileComm, cls).get_testing_options(**kwargs)
out['kwargs']['read_meth'] = read_meth
out['kwargs']['open_as_binary'] = open_as_binary
if (read_meth == 'read') and isinstance(out['recv'][0], backwards.bytes_type):
out['recv'] = [b''.join(out['recv'])]
if not open_as_binary:
out['contents'] = out['contents'].replace(
backwards.match_stype(out['contents'], '\n'),
backwards.match_stype(out['contents'], platform._newline))
return out
@classmethod
def is_installed(cls, language=None):
r"""Determine if the necessary libraries are installed for this
communication class.
Args:
language (str, optional): Specific language that should be checked
for compatibility. Defaults to None and all languages supported
on the current platform will be checked.
Returns:
bool: Is the comm installed.
"""
# Filesystem is implied
return True
@classmethod
def underlying_comm_class(self):
r"""str: Name of underlying communication class."""
return 'FileComm'
@classmethod
def close_registry_entry(cls, value):
r"""Close a registry entry."""
out = False
if not value.closed: # pragma: debug
value.close()
out = True
return out
@classmethod
def new_comm_kwargs(cls, *args, **kwargs):
r"""Initialize communication with new queue."""
kwargs.setdefault('address', 'file.txt')
return args, kwargs
@property
def open_mode(self):
r"""str: Mode that should be used to open the file."""
if self.direction == 'recv':
io_mode = 'r'
elif self.append == 'ow':
io_mode = 'r+'
elif self.append:
io_mode = 'a'
else:
io_mode = 'w'
if self.open_as_binary:
io_mode += 'b'
return io_mode
def opp_comm_kwargs(self):
r"""Get keyword arguments to initialize communication with opposite
comm object.
Returns:
dict: Keyword arguments for opposite comm object.
"""
kwargs = super(FileComm, self).opp_comm_kwargs()
kwargs['newline'] = self.newline
kwargs['open_as_binary'] = self.open_as_binary
kwargs['is_series'] = self.is_series
return kwargs
@property
def registry_key(self):
r"""str: String used to register the socket."""
# return self.address
return '%s_%s_%s' % (self.address, self.direction, self.uuid)
def record_position(self):
r"""Record the current position in the file/series."""
_rec_pos = self.fd.tell()
_rec_ind = self._series_index
return _rec_pos, _rec_ind
def change_position(self, file_pos, series_index=None):
r"""Change the position in the file/series.
Args:
file_pos (int): Position that should be moved to in the file.
series_index (int, optinal): Index of the file in the series that
should be moved to. Defaults to None and will be set to the
current series index.
"""
if series_index is None:
series_index = self._series_index
self.advance_in_series(series_index)
self.advance_in_file(file_pos)
def advance_in_file(self, file_pos):
r"""Advance to a certain position in the current file.
Args:
file_pos (int): Position that should be moved to in the current.
file.
"""
if self.is_open:
try:
self.fd.seek(file_pos)
except (AttributeError, ValueError): # pragma: debug
if self.is_open:
raise
def advance_in_series(self, series_index=None):
r"""Advance to a certain file in a series.
Args:
series_index (int, optional): Index of file in the series that
should be moved to. Defaults to None and call will advance to
the next file in the series.
Returns:
bool: True if the file was advanced in the series, False otherwise.
"""
out = False
if self.is_series:
if series_index is None:
series_index = self._series_index + 1
if self._series_index != series_index:
if (((self.direction == 'send')
or os.path.isfile(self.get_series_address(series_index)))):
self._file_close()
self._series_index = series_index
self._open()
out = True
self.debug("Advanced to %d", series_index)
return out
def get_series_address(self, index=None):
r"""Get the address of a file in the series.
Args:
index (int, optional): Index in series to get address for.
Defaults to None and the current index is used.
Returns:
str: Address for the file in the series.
"""
if index is None:
index = self._series_index
return self.address % index
@property
def current_address(self):
r"""str: Address of file currently being used."""
if self.is_series:
address = self.get_series_address()
else:
address = self.address
return address
def _open(self):
address = self.current_address
if self.fd is None:
if (not os.path.isfile(address)) and (self.wait_for_creation > 0):
T = self.start_timeout(self.wait_for_creation)
while (not T.is_out) and (not os.path.isfile(address)):
self.sleep()
self.stop_timeout()
self._fd = open(address, self.open_mode)
T = self.start_timeout()
while (not T.is_out) and (not self.is_open): # pragma: debug
self.sleep()
self.stop_timeout()
if self.append == 'ow':
try:
self.fd.seek(0, os.SEEK_END)
except (AttributeError, ValueError): # pragma: debug
if self.is_open:
raise
def _file_close(self):
if self.is_open:
try:
self.fd.flush()
os.fsync(self.fd.fileno())
except OSError: # pragma: debug
pass
try:
self.fd.close()
except (AttributeError, ValueError): # pragma: debug
if self.is_open:
raise
self._fd = None
def open(self):
r"""Open the file."""
super(FileComm, self).open()
self._open()
self.register_comm(self.registry_key, self.fd)
def _close(self, *args, **kwargs):
r"""Close the file."""
self._file_close()
self.unregister_comm(self.registry_key)
super(FileComm, self)._close(*args, **kwargs)
def remove_file(self):
r"""Remove the file."""
assert(self.is_closed)
if self.is_series:
i = 0
while True:
address = self.get_series_address(i)
if not os.path.isfile(address):
break
os.remove(address)
i += 1
else:
if os.path.isfile(self.address):
os.remove(self.address)
@property
def is_open(self):
r"""bool: True if the connection is open."""
try:
return (self.fd is not None) and (not self.fd.closed)
except AttributeError: # pragma: debug
if self.fd is not None:
raise
return False
@property
def fd(self):
r"""Associated file identifier."""
return self._fd
@property
def remaining_bytes(self):
r"""int: Remaining bytes in the file."""
if self.is_closed or self.direction == 'send':
return 0
pos = self.record_position()
try:
curpos = self.fd.tell()
self.fd.seek(0, os.SEEK_END)
endpos = self.fd.tell()
out = endpos - curpos
except (ValueError, AttributeError): # pragma: debug
if self.is_open:
raise
out = 0
if self.is_series:
i = self._series_index + 1
while True:
fname = self.get_series_address(i)
if not os.path.isfile(fname):
break
out += os.path.getsize(fname)
i += 1
self.change_position(*pos)
return out
@property
def n_msg_recv(self):
r"""int: The number of messages in the file."""
if self.is_closed:
return 0
if self.read_meth == 'read':
return int(self.remaining_bytes > 0)
elif self.read_meth == 'readline':
pos = self.record_position()
try:
out = 0
flag, msg = self._recv()
while len(msg) != 0 and msg != self.eof_msg:
out += 1
flag, msg = self._recv()
except ValueError: # pragma: debug
out = 0
self.change_position(*pos)
else: # pragma: debug
self.error('Unsupported read_meth: %s', self.read_meth)
out = 0
return out
def on_send_eof(self):
r"""Close file when EOF to be sent.
Returns:
bool: False so that message not sent.
"""
flag, msg_s = super(FileComm, self).on_send_eof()
try:
self.fd.flush()
except (AttributeError, ValueError): # pragma: debug
if self.is_open:
raise
# self.close()
return flag, msg_s
def _send(self, msg):
r"""Write message to a file.
Args:
msg (bytes, str): Data to write to the file.
Returns:
bool: Success or failure of writing to the file.
"""
try:
if msg != self.eof_msg:
if not self.open_as_binary:
msg = backwards.as_unicode(msg)
self.fd.write(msg)
if self.append == 'ow':
self.fd.truncate()
self.fd.flush()
except (AttributeError, ValueError): # pragma: debug
if self.is_open:
raise
return False
if msg != self.eof_msg and self.is_series:
self.advance_in_series()
self.debug("Advanced to %d", self._series_index)
return True
def _recv(self, timeout=0):
r"""Reads message from a file.
Args:
timeout (float, optional): Time in seconds to wait for a message.
Defaults to self.recv_timeout. Unused.
Returns:
tuple (bool, str): Success or failure of reading from the file and
the read messages as bytes.
"""
flag = True
try:
if self.read_meth == 'read':
out = self.fd.read()
elif self.read_meth == 'readline':
out = self.fd.readline()
except BaseException: # pragma: debug
# Use this to catch case where close called during receive.
# In the future this should be handled via a lock.
out = ''
if len(out) == 0:
if self.advance_in_series():
self.debug("Advanced to %d", self._series_index)
flag, out = self._recv()
else:
out = self.eof_msg
else:
out = out.replace(self.platform_newline, self.newline)
if not self.open_as_binary:
out = backwards.as_bytes(out)
return (flag, out)
def purge(self):
r"""Purge all messages from the comm."""
if self.is_open and self.direction == 'recv':
try:
self.fd.seek(0, os.SEEK_END)
except (AttributeError, ValueError): # pragma: debug
if self.is_open:
raise
|
[
"import os\nimport tempfile\nfrom yggdrasil import backwards, platform\nfrom yggdrasil.communication import CommBase\nfrom yggdrasil.schema import register_component, inherit_schema\nfrom yggdrasil.serialize.DirectSerialize import DirectSerialize\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n r\"\"\"Class for handling I/O from/to a file on disk.\n\n >>> x = FileComm('test_send', address='test_file.txt', direction='send')\n >>> x.send('Test message')\n True\n >>> with open('test_file.txt', 'r') as fd:\n ... print(fd.read())\n Test message\n >>> x = FileComm('test_recv', address='test_file.txt', direction='recv')\n >>> x.recv()\n (True, b'Test message')\n\n Args:\n name (str): The environment variable where communication address is\n stored.\n read_meth (str, optional): Method that should be used to read data\n from the file. Defaults to 'read'. Ignored if direction is 'send'.\n append (bool, optional): If True and writing, file is openned in append\n mode. Defaults to False.\n in_temp (bool, optional): If True, the path will be considered relative\n to the platform temporary directory. Defaults to False.\n open_as_binary (bool, optional): If True, the file is opened in binary\n mode. Defaults to True.\n newline (str, optional): String indicating a new line. Defaults to\n serialize._default_newline.\n is_series (bool, optional): If True, input/output will be done to\n a series of files. If reading, each file will be processed until\n the end is reached. If writing, each output will be to a new\n file in the series. The addressed is assumed to contain a format\n for the index of the file. Defaults to False.\n wait_for_creation (float, optional): Time (in seconds) that should be\n waited before opening for the file to be created if it dosn't exist.\n Defaults to 0 s and file will attempt to be opened immediately.\n **kwargs: Additional keywords arguments are passed to parent class.\n\n Attributes:\n fd (file): File that should be read/written.\n read_meth (str): Method that should be used to read data from the file.\n append (bool): If True and writing, file is openned in append mode.\n in_temp (bool): If True, the path will be considered relative to the\n platform temporary directory.\n open_as_binary (bool): If True, the file is opened in binary mode.\n newline (str): String indicating a new line.\n is_series (bool): If True, input/output will be done to a series of\n files. If reading, each file will be processed until the end is\n reached. If writing, each output will be to a new file in the series.\n platform_newline (str): String indicating a newline on the current\n platform.\n\n Raises:\n ValueError: If the read_meth is not one of the supported values.\n\n \"\"\"\n\n _filetype = 'binary'\n _datatype = {'type': 'bytes'}\n _schema_type = 'file'\n _schema_required = ['name', 'filetype', 'working_dir']\n _schema_properties = inherit_schema(\n CommBase.CommBase._schema_properties,\n {'working_dir': {'type': 'string'},\n 'filetype': {'type': 'string', 'default': _filetype},\n 'append': {'type': 'boolean', 'default': False},\n 'in_temp': {'type': 'boolean', 'default': False},\n 'is_series': {'type': 'boolean', 'default': False},\n 'wait_for_creation': {'type': 'float', 'default': 0.0}},\n remove_keys=['commtype', 'datatype'], **DirectSerialize._schema_properties)\n _default_serializer = DirectSerialize\n _attr_conv = ['newline', 'platform_newline']\n _default_extension = '.txt'\n is_file = True\n _maxMsgSize = 0\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs):\n r\"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n # Process file class keywords\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n # Put string attributes in the correct format\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **kwargs):\n r\"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if (read_meth == 'read') and isinstance(out['recv'][0], backwards.bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(\n backwards.match_stype(out['contents'], '\\n'),\n backwards.match_stype(out['contents'], platform._newline))\n return out\n \n @classmethod\n def is_installed(cls, language=None):\n r\"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n # Filesystem is implied\n return True\n\n @classmethod\n def underlying_comm_class(self):\n r\"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n r\"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed: # pragma: debug\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n r\"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n\n @property\n def open_mode(self):\n r\"\"\"str: Mode that should be used to open the file.\"\"\"\n if self.direction == 'recv':\n io_mode = 'r'\n elif self.append == 'ow':\n io_mode = 'r+'\n elif self.append:\n io_mode = 'a'\n else:\n io_mode = 'w'\n if self.open_as_binary:\n io_mode += 'b'\n return io_mode\n\n def opp_comm_kwargs(self):\n r\"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n r\"\"\"str: String used to register the socket.\"\"\"\n # return self.address\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n r\"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n r\"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n r\"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n r\"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if (((self.direction == 'send')\n or os.path.isfile(self.get_series_address(series_index)))):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug(\"Advanced to %d\", series_index)\n return out\n\n def get_series_address(self, index=None):\n r\"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n r\"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n \n def _open(self):\n address = self.current_address\n if self.fd is None:\n if (not os.path.isfile(address)) and (self.wait_for_creation > 0):\n T = self.start_timeout(self.wait_for_creation)\n while (not T.is_out) and (not os.path.isfile(address)):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while (not T.is_out) and (not self.is_open): # pragma: debug\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n\n def _file_close(self):\n if self.is_open:\n try:\n self.fd.flush()\n os.fsync(self.fd.fileno())\n except OSError: # pragma: debug\n pass\n try:\n self.fd.close()\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n self._fd = None\n\n def open(self):\n r\"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n r\"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n r\"\"\"Remove the file.\"\"\"\n assert(self.is_closed)\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n else:\n if os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n r\"\"\"bool: True if the connection is open.\"\"\"\n try:\n return (self.fd is not None) and (not self.fd.closed)\n except AttributeError: # pragma: debug\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n r\"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n r\"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError): # pragma: debug\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n r\"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError: # pragma: debug\n out = 0\n self.change_position(*pos)\n else: # pragma: debug\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n r\"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n # self.close()\n return flag, msg_s\n\n def _send(self, msg):\n r\"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug(\"Advanced to %d\", self._series_index)\n return True\n\n def _recv(self, timeout=0):\n r\"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException: # pragma: debug\n # Use this to catch case where close called during receive.\n # In the future this should be handled via a lock.\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug(\"Advanced to %d\", self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return (flag, out)\n\n def purge(self):\n r\"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise\n",
"import os\nimport tempfile\nfrom yggdrasil import backwards, platform\nfrom yggdrasil.communication import CommBase\nfrom yggdrasil.schema import register_component, inherit_schema\nfrom yggdrasil.serialize.DirectSerialize import DirectSerialize\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n \"\"\"Class for handling I/O from/to a file on disk.\n\n >>> x = FileComm('test_send', address='test_file.txt', direction='send')\n >>> x.send('Test message')\n True\n >>> with open('test_file.txt', 'r') as fd:\n ... print(fd.read())\n Test message\n >>> x = FileComm('test_recv', address='test_file.txt', direction='recv')\n >>> x.recv()\n (True, b'Test message')\n\n Args:\n name (str): The environment variable where communication address is\n stored.\n read_meth (str, optional): Method that should be used to read data\n from the file. Defaults to 'read'. Ignored if direction is 'send'.\n append (bool, optional): If True and writing, file is openned in append\n mode. Defaults to False.\n in_temp (bool, optional): If True, the path will be considered relative\n to the platform temporary directory. Defaults to False.\n open_as_binary (bool, optional): If True, the file is opened in binary\n mode. Defaults to True.\n newline (str, optional): String indicating a new line. Defaults to\n serialize._default_newline.\n is_series (bool, optional): If True, input/output will be done to\n a series of files. If reading, each file will be processed until\n the end is reached. If writing, each output will be to a new\n file in the series. The addressed is assumed to contain a format\n for the index of the file. Defaults to False.\n wait_for_creation (float, optional): Time (in seconds) that should be\n waited before opening for the file to be created if it dosn't exist.\n Defaults to 0 s and file will attempt to be opened immediately.\n **kwargs: Additional keywords arguments are passed to parent class.\n\n Attributes:\n fd (file): File that should be read/written.\n read_meth (str): Method that should be used to read data from the file.\n append (bool): If True and writing, file is openned in append mode.\n in_temp (bool): If True, the path will be considered relative to the\n platform temporary directory.\n open_as_binary (bool): If True, the file is opened in binary mode.\n newline (str): String indicating a new line.\n is_series (bool): If True, input/output will be done to a series of\n files. If reading, each file will be processed until the end is\n reached. If writing, each output will be to a new file in the series.\n platform_newline (str): String indicating a newline on the current\n platform.\n\n Raises:\n ValueError: If the read_meth is not one of the supported values.\n\n \"\"\"\n _filetype = 'binary'\n _datatype = {'type': 'bytes'}\n _schema_type = 'file'\n _schema_required = ['name', 'filetype', 'working_dir']\n _schema_properties = inherit_schema(CommBase.CommBase.\n _schema_properties, {'working_dir': {'type': 'string'}, 'filetype':\n {'type': 'string', 'default': _filetype}, 'append': {'type':\n 'boolean', 'default': False}, 'in_temp': {'type': 'boolean',\n 'default': False}, 'is_series': {'type': 'boolean', 'default': \n False}, 'wait_for_creation': {'type': 'float', 'default': 0.0}},\n remove_keys=['commtype', 'datatype'], **DirectSerialize.\n _schema_properties)\n _default_serializer = DirectSerialize\n _attr_conv = ['newline', 'platform_newline']\n _default_extension = '.txt'\n is_file = True\n _maxMsgSize = 0\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n \"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n\n @property\n def open_mode(self):\n \"\"\"str: Mode that should be used to open the file.\"\"\"\n if self.direction == 'recv':\n io_mode = 'r'\n elif self.append == 'ow':\n io_mode = 'r+'\n elif self.append:\n io_mode = 'a'\n else:\n io_mode = 'w'\n if self.open_as_binary:\n io_mode += 'b'\n return io_mode\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def _file_close(self):\n if self.is_open:\n try:\n self.fd.flush()\n os.fsync(self.fd.fileno())\n except OSError:\n pass\n try:\n self.fd.close()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n self._fd = None\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n \"\"\"Class for handling I/O from/to a file on disk.\n\n >>> x = FileComm('test_send', address='test_file.txt', direction='send')\n >>> x.send('Test message')\n True\n >>> with open('test_file.txt', 'r') as fd:\n ... print(fd.read())\n Test message\n >>> x = FileComm('test_recv', address='test_file.txt', direction='recv')\n >>> x.recv()\n (True, b'Test message')\n\n Args:\n name (str): The environment variable where communication address is\n stored.\n read_meth (str, optional): Method that should be used to read data\n from the file. Defaults to 'read'. Ignored if direction is 'send'.\n append (bool, optional): If True and writing, file is openned in append\n mode. Defaults to False.\n in_temp (bool, optional): If True, the path will be considered relative\n to the platform temporary directory. Defaults to False.\n open_as_binary (bool, optional): If True, the file is opened in binary\n mode. Defaults to True.\n newline (str, optional): String indicating a new line. Defaults to\n serialize._default_newline.\n is_series (bool, optional): If True, input/output will be done to\n a series of files. If reading, each file will be processed until\n the end is reached. If writing, each output will be to a new\n file in the series. The addressed is assumed to contain a format\n for the index of the file. Defaults to False.\n wait_for_creation (float, optional): Time (in seconds) that should be\n waited before opening for the file to be created if it dosn't exist.\n Defaults to 0 s and file will attempt to be opened immediately.\n **kwargs: Additional keywords arguments are passed to parent class.\n\n Attributes:\n fd (file): File that should be read/written.\n read_meth (str): Method that should be used to read data from the file.\n append (bool): If True and writing, file is openned in append mode.\n in_temp (bool): If True, the path will be considered relative to the\n platform temporary directory.\n open_as_binary (bool): If True, the file is opened in binary mode.\n newline (str): String indicating a new line.\n is_series (bool): If True, input/output will be done to a series of\n files. If reading, each file will be processed until the end is\n reached. If writing, each output will be to a new file in the series.\n platform_newline (str): String indicating a newline on the current\n platform.\n\n Raises:\n ValueError: If the read_meth is not one of the supported values.\n\n \"\"\"\n _filetype = 'binary'\n _datatype = {'type': 'bytes'}\n _schema_type = 'file'\n _schema_required = ['name', 'filetype', 'working_dir']\n _schema_properties = inherit_schema(CommBase.CommBase.\n _schema_properties, {'working_dir': {'type': 'string'}, 'filetype':\n {'type': 'string', 'default': _filetype}, 'append': {'type':\n 'boolean', 'default': False}, 'in_temp': {'type': 'boolean',\n 'default': False}, 'is_series': {'type': 'boolean', 'default': \n False}, 'wait_for_creation': {'type': 'float', 'default': 0.0}},\n remove_keys=['commtype', 'datatype'], **DirectSerialize.\n _schema_properties)\n _default_serializer = DirectSerialize\n _attr_conv = ['newline', 'platform_newline']\n _default_extension = '.txt'\n is_file = True\n _maxMsgSize = 0\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n \"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n\n @property\n def open_mode(self):\n \"\"\"str: Mode that should be used to open the file.\"\"\"\n if self.direction == 'recv':\n io_mode = 'r'\n elif self.append == 'ow':\n io_mode = 'r+'\n elif self.append:\n io_mode = 'a'\n else:\n io_mode = 'w'\n if self.open_as_binary:\n io_mode += 'b'\n return io_mode\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def _file_close(self):\n if self.is_open:\n try:\n self.fd.flush()\n os.fsync(self.fd.fileno())\n except OSError:\n pass\n try:\n self.fd.close()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n self._fd = None\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n _filetype = 'binary'\n _datatype = {'type': 'bytes'}\n _schema_type = 'file'\n _schema_required = ['name', 'filetype', 'working_dir']\n _schema_properties = inherit_schema(CommBase.CommBase.\n _schema_properties, {'working_dir': {'type': 'string'}, 'filetype':\n {'type': 'string', 'default': _filetype}, 'append': {'type':\n 'boolean', 'default': False}, 'in_temp': {'type': 'boolean',\n 'default': False}, 'is_series': {'type': 'boolean', 'default': \n False}, 'wait_for_creation': {'type': 'float', 'default': 0.0}},\n remove_keys=['commtype', 'datatype'], **DirectSerialize.\n _schema_properties)\n _default_serializer = DirectSerialize\n _attr_conv = ['newline', 'platform_newline']\n _default_extension = '.txt'\n is_file = True\n _maxMsgSize = 0\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n \"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n\n @property\n def open_mode(self):\n \"\"\"str: Mode that should be used to open the file.\"\"\"\n if self.direction == 'recv':\n io_mode = 'r'\n elif self.append == 'ow':\n io_mode = 'r+'\n elif self.append:\n io_mode = 'a'\n else:\n io_mode = 'w'\n if self.open_as_binary:\n io_mode += 'b'\n return io_mode\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def _file_close(self):\n if self.is_open:\n try:\n self.fd.flush()\n os.fsync(self.fd.fileno())\n except OSError:\n pass\n try:\n self.fd.close()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n self._fd = None\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n \"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n\n @property\n def open_mode(self):\n \"\"\"str: Mode that should be used to open the file.\"\"\"\n if self.direction == 'recv':\n io_mode = 'r'\n elif self.append == 'ow':\n io_mode = 'r+'\n elif self.append:\n io_mode = 'a'\n else:\n io_mode = 'w'\n if self.open_as_binary:\n io_mode += 'b'\n return io_mode\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def _file_close(self):\n if self.is_open:\n try:\n self.fd.flush()\n os.fsync(self.fd.fileno())\n except OSError:\n pass\n try:\n self.fd.close()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n self._fd = None\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n \"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def _file_close(self):\n if self.is_open:\n try:\n self.fd.flush()\n os.fsync(self.fd.fileno())\n except OSError:\n pass\n try:\n self.fd.close()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n self._fd = None\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n\n @classmethod\n def new_comm_kwargs(cls, *args, **kwargs):\n \"\"\"Initialize communication with new queue.\"\"\"\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n\n @classmethod\n def is_installed(cls, language=None):\n \"\"\"Determine if the necessary libraries are installed for this\n communication class.\n\n Args:\n language (str, optional): Specific language that should be checked\n for compatibility. Defaults to None and all languages supported\n on the current platform will be checked.\n\n Returns:\n bool: Is the comm installed.\n\n \"\"\"\n return True\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n\n @property\n def registry_key(self):\n \"\"\"str: String used to register the socket.\"\"\"\n return '%s_%s_%s' % (self.address, self.direction, self.uuid)\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n\n @property\n def remaining_bytes(self):\n \"\"\"int: Remaining bytes in the file.\"\"\"\n if self.is_closed or self.direction == 'send':\n return 0\n pos = self.record_position()\n try:\n curpos = self.fd.tell()\n self.fd.seek(0, os.SEEK_END)\n endpos = self.fd.tell()\n out = endpos - curpos\n except (ValueError, AttributeError):\n if self.is_open:\n raise\n out = 0\n if self.is_series:\n i = self._series_index + 1\n while True:\n fname = self.get_series_address(i)\n if not os.path.isfile(fname):\n break\n out += os.path.getsize(fname)\n i += 1\n self.change_position(*pos)\n return out\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n\n @property\n def fd(self):\n \"\"\"Associated file identifier.\"\"\"\n return self._fd\n <function token>\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n\n @property\n def n_msg_recv(self):\n \"\"\"int: The number of messages in the file.\"\"\"\n if self.is_closed:\n return 0\n if self.read_meth == 'read':\n return int(self.remaining_bytes > 0)\n elif self.read_meth == 'readline':\n pos = self.record_position()\n try:\n out = 0\n flag, msg = self._recv()\n while len(msg) != 0 and msg != self.eof_msg:\n out += 1\n flag, msg = self._recv()\n except ValueError:\n out = 0\n self.change_position(*pos)\n else:\n self.error('Unsupported read_meth: %s', self.read_meth)\n out = 0\n return out\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n\n def on_send_eof(self):\n \"\"\"Close file when EOF to be sent.\n\n Returns:\n bool: False so that message not sent.\n\n \"\"\"\n flag, msg_s = super(FileComm, self).on_send_eof()\n try:\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return flag, msg_s\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n\n def _init_before_open(self, read_meth='read', open_as_binary=True, **kwargs\n ):\n \"\"\"Get absolute path and set attributes.\"\"\"\n super(FileComm, self)._init_before_open(**kwargs)\n if not hasattr(self, '_fd'):\n self._fd = None\n if read_meth not in ['read', 'readline']:\n raise ValueError(\"read_meth '%s' not supported.\" % read_meth)\n self.read_meth = read_meth\n self.platform_newline = platform._newline\n if self.in_temp:\n self.address = os.path.join(tempfile.gettempdir(), self.address)\n self.address = os.path.abspath(self.address)\n self.open_as_binary = open_as_binary\n self._series_index = 0\n if self.open_as_binary:\n func_conv = backwards.as_bytes\n else:\n func_conv = backwards.as_unicode\n for k in self._attr_conv:\n v = getattr(self, k)\n if v is not None:\n setattr(self, k, func_conv(v))\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('close_on_eof_send', True)\n return super(FileComm, self).__init__(*args, **kwargs)\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n\n def remove_file(self):\n \"\"\"Remove the file.\"\"\"\n assert self.is_closed\n if self.is_series:\n i = 0\n while True:\n address = self.get_series_address(i)\n if not os.path.isfile(address):\n break\n os.remove(address)\n i += 1\n elif os.path.isfile(self.address):\n os.remove(self.address)\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n\n def opp_comm_kwargs(self):\n \"\"\"Get keyword arguments to initialize communication with opposite\n comm object.\n\n Returns:\n dict: Keyword arguments for opposite comm object.\n\n \"\"\"\n kwargs = super(FileComm, self).opp_comm_kwargs()\n kwargs['newline'] = self.newline\n kwargs['open_as_binary'] = self.open_as_binary\n kwargs['is_series'] = self.is_series\n return kwargs\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n\n def change_position(self, file_pos, series_index=None):\n \"\"\"Change the position in the file/series.\n\n Args:\n file_pos (int): Position that should be moved to in the file.\n series_index (int, optinal): Index of the file in the series that\n should be moved to. Defaults to None and will be set to the\n current series index.\n\n \"\"\"\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n\n @property\n def is_open(self):\n \"\"\"bool: True if the connection is open.\"\"\"\n try:\n return self.fd is not None and not self.fd.closed\n except AttributeError:\n if self.fd is not None:\n raise\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n\n def _open(self):\n address = self.current_address\n if self.fd is None:\n if not os.path.isfile(address) and self.wait_for_creation > 0:\n T = self.start_timeout(self.wait_for_creation)\n while not T.is_out and not os.path.isfile(address):\n self.sleep()\n self.stop_timeout()\n self._fd = open(address, self.open_mode)\n T = self.start_timeout()\n while not T.is_out and not self.is_open:\n self.sleep()\n self.stop_timeout()\n if self.append == 'ow':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n\n def advance_in_series(self, series_index=None):\n \"\"\"Advance to a certain file in a series.\n\n Args:\n series_index (int, optional): Index of file in the series that\n should be moved to. Defaults to None and call will advance to\n the next file in the series.\n\n Returns:\n bool: True if the file was advanced in the series, False otherwise.\n\n \"\"\"\n out = False\n if self.is_series:\n if series_index is None:\n series_index = self._series_index + 1\n if self._series_index != series_index:\n if self.direction == 'send' or os.path.isfile(self.\n get_series_address(series_index)):\n self._file_close()\n self._series_index = series_index\n self._open()\n out = True\n self.debug('Advanced to %d', series_index)\n return out\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n\n @classmethod\n def close_registry_entry(cls, value):\n \"\"\"Close a registry entry.\"\"\"\n out = False\n if not value.closed:\n value.close()\n out = True\n return out\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @classmethod\n def get_testing_options(cls, read_meth='read', open_as_binary=True, **\n kwargs):\n \"\"\"Method to return a dictionary of testing options for this class.\n\n Returns:\n dict: Dictionary of variables to use for testing. Key/value pairs:\n kwargs (dict): Keyword arguments for comms tested with the\n provided content.\n send (list): List of objects to send to test file.\n recv (list): List of objects that will be received from a test\n file that was sent the messages in 'send'.\n contents (bytes): Bytes contents of test file created by sending\n the messages in 'send'.\n\n \"\"\"\n out = super(FileComm, cls).get_testing_options(**kwargs)\n out['kwargs']['read_meth'] = read_meth\n out['kwargs']['open_as_binary'] = open_as_binary\n if read_meth == 'read' and isinstance(out['recv'][0], backwards.\n bytes_type):\n out['recv'] = [b''.join(out['recv'])]\n if not open_as_binary:\n out['contents'] = out['contents'].replace(backwards.match_stype\n (out['contents'], '\\n'), backwards.match_stype(out[\n 'contents'], platform._newline))\n return out\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n\n def get_series_address(self, index=None):\n \"\"\"Get the address of a file in the series.\n\n Args:\n index (int, optional): Index in series to get address for.\n Defaults to None and the current index is used.\n\n Returns:\n str: Address for the file in the series.\n\n \"\"\"\n if index is None:\n index = self._series_index\n return self.address % index\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n <function token>\n\n @property\n def current_address(self):\n \"\"\"str: Address of file currently being used.\"\"\"\n if self.is_series:\n address = self.get_series_address()\n else:\n address = self.address\n return address\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def record_position(self):\n \"\"\"Record the current position in the file/series.\"\"\"\n _rec_pos = self.fd.tell()\n _rec_ind = self._series_index\n return _rec_pos, _rec_ind\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def advance_in_file(self, file_pos):\n \"\"\"Advance to a certain position in the current file.\n\n Args:\n file_pos (int): Position that should be moved to in the current.\n file.\n\n \"\"\"\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n\n def purge(self):\n \"\"\"Purge all messages from the comm.\"\"\"\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n\n def _close(self, *args, **kwargs):\n \"\"\"Close the file.\"\"\"\n self._file_close()\n self.unregister_comm(self.registry_key)\n super(FileComm, self)._close(*args, **kwargs)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n <function token>\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _send(self, msg):\n \"\"\"Write message to a file.\n\n Args:\n msg (bytes, str): Data to write to the file.\n\n Returns:\n bool: Success or failure of writing to the file.\n\n \"\"\"\n try:\n if msg != self.eof_msg:\n if not self.open_as_binary:\n msg = backwards.as_unicode(msg)\n self.fd.write(msg)\n if self.append == 'ow':\n self.fd.truncate()\n self.fd.flush()\n except (AttributeError, ValueError):\n if self.is_open:\n raise\n return False\n if msg != self.eof_msg and self.is_series:\n self.advance_in_series()\n self.debug('Advanced to %d', self._series_index)\n return True\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n <function token>\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _recv(self, timeout=0):\n \"\"\"Reads message from a file.\n\n Args:\n timeout (float, optional): Time in seconds to wait for a message.\n Defaults to self.recv_timeout. Unused.\n\n Returns:\n tuple (bool, str): Success or failure of reading from the file and\n the read messages as bytes.\n\n \"\"\"\n flag = True\n try:\n if self.read_meth == 'read':\n out = self.fd.read()\n elif self.read_meth == 'readline':\n out = self.fd.readline()\n except BaseException:\n out = ''\n if len(out) == 0:\n if self.advance_in_series():\n self.debug('Advanced to %d', self._series_index)\n flag, out = self._recv()\n else:\n out = self.eof_msg\n else:\n out = out.replace(self.platform_newline, self.newline)\n if not self.open_as_binary:\n out = backwards.as_bytes(out)\n return flag, out\n <function token>\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def open(self):\n \"\"\"Open the file.\"\"\"\n super(FileComm, self).open()\n self._open()\n self.register_comm(self.registry_key, self.fd)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def underlying_comm_class(self):\n \"\"\"str: Name of underlying communication class.\"\"\"\n return 'FileComm'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\n@register_component\nclass FileComm(CommBase.CommBase):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,472 |
23071296f68e2fee8896ace89d3bd1e4c65ac923
|
#CRASH TEST
gd0 = GenericDecap(bytes=0)
CRASH_TEST_INPUTS.append([gd0, 1, 1])
gd1 = GenericDecap(bytes=23)
CRASH_TEST_INPUTS.append([gd1, 1, 1])
#OUTPUT TESTS
#test strip off ether
gd2 = GenericDecap(bytes=14)
eth = scapy.Ether(src='de:ad:be:ef:12:34', dst='12:34:de:ad:be:ef')
ip = scapy.IP(src="1.2.3.4", dst="2.3.4.5", ttl=98)
udp = scapy.UDP(sport=10001, dport=10002)
payload = 'helloworldhelloworldhelloworld'
eth_packet_in = eth/ip/udp/payload
eth_packet_out = ip/udp/payload
OUTPUT_TEST_INPUTS.append([gd2, 1, 1,
[{'input_port': 0, 'input_packet': eth_packet_in,
'output_port': 0, 'output_packet': eth_packet_out}]])
|
[
"#CRASH TEST\ngd0 = GenericDecap(bytes=0)\nCRASH_TEST_INPUTS.append([gd0, 1, 1])\n\ngd1 = GenericDecap(bytes=23)\nCRASH_TEST_INPUTS.append([gd1, 1, 1])\n\n#OUTPUT TESTS\n\n#test strip off ether\ngd2 = GenericDecap(bytes=14)\neth = scapy.Ether(src='de:ad:be:ef:12:34', dst='12:34:de:ad:be:ef')\nip = scapy.IP(src=\"1.2.3.4\", dst=\"2.3.4.5\", ttl=98)\nudp = scapy.UDP(sport=10001, dport=10002)\npayload = 'helloworldhelloworldhelloworld'\n\neth_packet_in = eth/ip/udp/payload\neth_packet_out = ip/udp/payload\n\nOUTPUT_TEST_INPUTS.append([gd2, 1, 1, \n\t[{'input_port': 0, 'input_packet': eth_packet_in,\n\t'output_port': 0, 'output_packet': eth_packet_out}]])\n",
"gd0 = GenericDecap(bytes=0)\nCRASH_TEST_INPUTS.append([gd0, 1, 1])\ngd1 = GenericDecap(bytes=23)\nCRASH_TEST_INPUTS.append([gd1, 1, 1])\ngd2 = GenericDecap(bytes=14)\neth = scapy.Ether(src='de:ad:be:ef:12:34', dst='12:34:de:ad:be:ef')\nip = scapy.IP(src='1.2.3.4', dst='2.3.4.5', ttl=98)\nudp = scapy.UDP(sport=10001, dport=10002)\npayload = 'helloworldhelloworldhelloworld'\neth_packet_in = eth / ip / udp / payload\neth_packet_out = ip / udp / payload\nOUTPUT_TEST_INPUTS.append([gd2, 1, 1, [{'input_port': 0, 'input_packet':\n eth_packet_in, 'output_port': 0, 'output_packet': eth_packet_out}]])\n",
"<assignment token>\nCRASH_TEST_INPUTS.append([gd0, 1, 1])\n<assignment token>\nCRASH_TEST_INPUTS.append([gd1, 1, 1])\n<assignment token>\nOUTPUT_TEST_INPUTS.append([gd2, 1, 1, [{'input_port': 0, 'input_packet':\n eth_packet_in, 'output_port': 0, 'output_packet': eth_packet_out}]])\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,473 |
c3a7eb4c32ba4ac18fd62112cae3c6a1566c6f7e
|
import mysql.connector
from indemo import mdbConfig
mdb = mysql.connector.connect(**mdbConfig)
mycursor = mdb.cursor()
class Teachers:
def __init__(self, name, address, subject, clas):
self.mdb = mysql.connector.connect(**mdbConfig)
self.name = name
self.address = address
self.subject = subject
self.clas = clas
self.mycursor = self.mdb.cursor()
print("You are connected to the database")
print(self.mdb)
def __del__(self):
self.mdb.close()
def employ(self):
sql = "INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)"
values = (self.name, self.address, self.subject, self.clas)
self.mycursor.execute(sql, values)
self.mdb.commit()
print("Added to database")
print('')
print("CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER")
def view_all_teachers():
mycursor.execute("SELECT * FROM teachers")
rows = mycursor.fetchall()
for row in rows:
print(row)
def sack():
mycursor = mdb.cursor()
teacher = input("Enter teacher's name: ")
sql = "DELETE FROM teachers WHERE name = '{}'".format(teacher)
mycursor.execute(sql)
mdb.commit()
print('')
print("TEACHER'S RECORD DELETED FROM DATABASE")
def employ_teacher():
name = input("Enter teacher's name: ")
address = input("Enter teacher's address: ")
subject = input("Enter teacher's subject: ")
clas = input("Enter teacher's class: ")
name = Teachers(name, address, subject, clas)
name.employ()
def update_teacher_info():
mycursor.execute("SELECT id, name, address, subject, class FROM teachers")
rows = mycursor.fetchall()
teacher = str(input("Enter teacher's name as it appears in the portal: "))
mycursor.execute("SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'".format(teacher))
rows = mycursor.fetchall()
ind = len(rows)
if ind < 1:
print("No teacher with the name")
print("Make sure you enter the name as it is in the database \n")
update_teacher_info()
elif ind >= 1:
edit_name = str(input("Do you want to edit name? Type 'YES' to edit or 'NO' to continue: "))
edit_name = edit_name.upper()
if edit_name == 'YES':
edit_name = str(input("Enter the correct name: "))
elif edit_name == 'NO':
edit_name = rows[1]
edit_address = str(input("Do you want to edit address? Type 'YES' to edit or 'NO' to continue: "))
edit_address = edit_address.upper()
if edit_address == 'YES':
edit_address = str(input("Enter the correct address: "))
elif edit_address == 'NO':
edit_address = rows[2]
edit_subject = str(input("Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: "))
edit_subject = edit_subject.upper()
if edit_subject == 'YES':
edit_subject = str(input("Enter the correct subject: "))
elif edit_subject == 'NO':
edit_subject = rows[3]
edit_class = str(input("Do you want to edit class? Type 'YES' to edit or 'NO' to continue: "))
edit_class = edit_class.upper()
if edit_class == 'YES':
edit_class = str(input("Enter the correct class: "))
elif edit_class == 'NO':
edit_class = rows[4]
sql = "UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}".format(
edit_name, edit_address, edit_subject, edit_class, rows[0])
mycursor.execute(sql)
mdb.commit()
print("")
print("TEACHER'S DATABASE EDITED SUCCESSFULLY")
else:
print("Invalid input \n")
update_teacher_info()
# update_teacher_info()
|
[
"import mysql.connector\r\nfrom indemo import mdbConfig\r\n\r\nmdb = mysql.connector.connect(**mdbConfig)\r\n\r\nmycursor = mdb.cursor()\r\n\r\n\r\nclass Teachers:\r\n def __init__(self, name, address, subject, clas):\r\n self.mdb = mysql.connector.connect(**mdbConfig)\r\n self.name = name\r\n self.address = address\r\n self.subject = subject\r\n self.clas = clas\r\n self.mycursor = self.mdb.cursor()\r\n print(\"You are connected to the database\")\r\n print(self.mdb)\r\n\r\n def __del__(self):\r\n self.mdb.close()\r\n\r\n def employ(self):\r\n sql = \"INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)\"\r\n values = (self.name, self.address, self.subject, self.clas)\r\n self.mycursor.execute(sql, values)\r\n self.mdb.commit()\r\n print(\"Added to database\")\r\n print('')\r\n print(\"CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER\")\r\n\r\n\r\ndef view_all_teachers():\r\n mycursor.execute(\"SELECT * FROM teachers\")\r\n rows = mycursor.fetchall()\r\n for row in rows:\r\n print(row)\r\n\r\n\r\ndef sack():\r\n mycursor = mdb.cursor()\r\n teacher = input(\"Enter teacher's name: \")\r\n sql = \"DELETE FROM teachers WHERE name = '{}'\".format(teacher)\r\n mycursor.execute(sql)\r\n mdb.commit()\r\n print('')\r\n print(\"TEACHER'S RECORD DELETED FROM DATABASE\")\r\n\r\n\r\ndef employ_teacher():\r\n name = input(\"Enter teacher's name: \")\r\n address = input(\"Enter teacher's address: \")\r\n subject = input(\"Enter teacher's subject: \")\r\n clas = input(\"Enter teacher's class: \")\r\n\r\n name = Teachers(name, address, subject, clas)\r\n name.employ()\r\n\r\n\r\ndef update_teacher_info():\r\n mycursor.execute(\"SELECT id, name, address, subject, class FROM teachers\")\r\n rows = mycursor.fetchall()\r\n teacher = str(input(\"Enter teacher's name as it appears in the portal: \"))\r\n mycursor.execute(\"SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'\".format(teacher))\r\n rows = mycursor.fetchall()\r\n ind = len(rows)\r\n if ind < 1:\r\n print(\"No teacher with the name\")\r\n print(\"Make sure you enter the name as it is in the database \\n\")\r\n update_teacher_info()\r\n elif ind >= 1:\r\n edit_name = str(input(\"Do you want to edit name? Type 'YES' to edit or 'NO' to continue: \"))\r\n edit_name = edit_name.upper()\r\n if edit_name == 'YES':\r\n edit_name = str(input(\"Enter the correct name: \"))\r\n elif edit_name == 'NO':\r\n edit_name = rows[1]\r\n edit_address = str(input(\"Do you want to edit address? Type 'YES' to edit or 'NO' to continue: \"))\r\n edit_address = edit_address.upper()\r\n if edit_address == 'YES':\r\n edit_address = str(input(\"Enter the correct address: \"))\r\n elif edit_address == 'NO':\r\n edit_address = rows[2]\r\n edit_subject = str(input(\"Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: \"))\r\n edit_subject = edit_subject.upper()\r\n if edit_subject == 'YES':\r\n edit_subject = str(input(\"Enter the correct subject: \"))\r\n elif edit_subject == 'NO':\r\n edit_subject = rows[3]\r\n edit_class = str(input(\"Do you want to edit class? Type 'YES' to edit or 'NO' to continue: \"))\r\n edit_class = edit_class.upper()\r\n if edit_class == 'YES':\r\n edit_class = str(input(\"Enter the correct class: \"))\r\n elif edit_class == 'NO':\r\n edit_class = rows[4]\r\n\r\n sql = \"UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}\".format(\r\n edit_name, edit_address, edit_subject, edit_class, rows[0])\r\n\r\n mycursor.execute(sql)\r\n mdb.commit()\r\n print(\"\")\r\n print(\"TEACHER'S DATABASE EDITED SUCCESSFULLY\")\r\n else:\r\n print(\"Invalid input \\n\")\r\n update_teacher_info()\r\n\r\n\r\n# update_teacher_info()\r\n",
"import mysql.connector\nfrom indemo import mdbConfig\nmdb = mysql.connector.connect(**mdbConfig)\nmycursor = mdb.cursor()\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\ndef view_all_teachers():\n mycursor.execute('SELECT * FROM teachers')\n rows = mycursor.fetchall()\n for row in rows:\n print(row)\n\n\ndef sack():\n mycursor = mdb.cursor()\n teacher = input(\"Enter teacher's name: \")\n sql = \"DELETE FROM teachers WHERE name = '{}'\".format(teacher)\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S RECORD DELETED FROM DATABASE\")\n\n\ndef employ_teacher():\n name = input(\"Enter teacher's name: \")\n address = input(\"Enter teacher's address: \")\n subject = input(\"Enter teacher's subject: \")\n clas = input(\"Enter teacher's class: \")\n name = Teachers(name, address, subject, clas)\n name.employ()\n\n\ndef update_teacher_info():\n mycursor.execute('SELECT id, name, address, subject, class FROM teachers')\n rows = mycursor.fetchall()\n teacher = str(input(\"Enter teacher's name as it appears in the portal: \"))\n mycursor.execute(\n \"SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'\"\n .format(teacher))\n rows = mycursor.fetchall()\n ind = len(rows)\n if ind < 1:\n print('No teacher with the name')\n print('Make sure you enter the name as it is in the database \\n')\n update_teacher_info()\n elif ind >= 1:\n edit_name = str(input(\n \"Do you want to edit name? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_name = edit_name.upper()\n if edit_name == 'YES':\n edit_name = str(input('Enter the correct name: '))\n elif edit_name == 'NO':\n edit_name = rows[1]\n edit_address = str(input(\n \"Do you want to edit address? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_address = edit_address.upper()\n if edit_address == 'YES':\n edit_address = str(input('Enter the correct address: '))\n elif edit_address == 'NO':\n edit_address = rows[2]\n edit_subject = str(input(\n \"Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_subject = edit_subject.upper()\n if edit_subject == 'YES':\n edit_subject = str(input('Enter the correct subject: '))\n elif edit_subject == 'NO':\n edit_subject = rows[3]\n edit_class = str(input(\n \"Do you want to edit class? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_class = edit_class.upper()\n if edit_class == 'YES':\n edit_class = str(input('Enter the correct class: '))\n elif edit_class == 'NO':\n edit_class = rows[4]\n sql = (\n \"UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}\"\n .format(edit_name, edit_address, edit_subject, edit_class, rows[0])\n )\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S DATABASE EDITED SUCCESSFULLY\")\n else:\n print('Invalid input \\n')\n update_teacher_info()\n",
"<import token>\nmdb = mysql.connector.connect(**mdbConfig)\nmycursor = mdb.cursor()\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\ndef view_all_teachers():\n mycursor.execute('SELECT * FROM teachers')\n rows = mycursor.fetchall()\n for row in rows:\n print(row)\n\n\ndef sack():\n mycursor = mdb.cursor()\n teacher = input(\"Enter teacher's name: \")\n sql = \"DELETE FROM teachers WHERE name = '{}'\".format(teacher)\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S RECORD DELETED FROM DATABASE\")\n\n\ndef employ_teacher():\n name = input(\"Enter teacher's name: \")\n address = input(\"Enter teacher's address: \")\n subject = input(\"Enter teacher's subject: \")\n clas = input(\"Enter teacher's class: \")\n name = Teachers(name, address, subject, clas)\n name.employ()\n\n\ndef update_teacher_info():\n mycursor.execute('SELECT id, name, address, subject, class FROM teachers')\n rows = mycursor.fetchall()\n teacher = str(input(\"Enter teacher's name as it appears in the portal: \"))\n mycursor.execute(\n \"SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'\"\n .format(teacher))\n rows = mycursor.fetchall()\n ind = len(rows)\n if ind < 1:\n print('No teacher with the name')\n print('Make sure you enter the name as it is in the database \\n')\n update_teacher_info()\n elif ind >= 1:\n edit_name = str(input(\n \"Do you want to edit name? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_name = edit_name.upper()\n if edit_name == 'YES':\n edit_name = str(input('Enter the correct name: '))\n elif edit_name == 'NO':\n edit_name = rows[1]\n edit_address = str(input(\n \"Do you want to edit address? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_address = edit_address.upper()\n if edit_address == 'YES':\n edit_address = str(input('Enter the correct address: '))\n elif edit_address == 'NO':\n edit_address = rows[2]\n edit_subject = str(input(\n \"Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_subject = edit_subject.upper()\n if edit_subject == 'YES':\n edit_subject = str(input('Enter the correct subject: '))\n elif edit_subject == 'NO':\n edit_subject = rows[3]\n edit_class = str(input(\n \"Do you want to edit class? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_class = edit_class.upper()\n if edit_class == 'YES':\n edit_class = str(input('Enter the correct class: '))\n elif edit_class == 'NO':\n edit_class = rows[4]\n sql = (\n \"UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}\"\n .format(edit_name, edit_address, edit_subject, edit_class, rows[0])\n )\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S DATABASE EDITED SUCCESSFULLY\")\n else:\n print('Invalid input \\n')\n update_teacher_info()\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\ndef view_all_teachers():\n mycursor.execute('SELECT * FROM teachers')\n rows = mycursor.fetchall()\n for row in rows:\n print(row)\n\n\ndef sack():\n mycursor = mdb.cursor()\n teacher = input(\"Enter teacher's name: \")\n sql = \"DELETE FROM teachers WHERE name = '{}'\".format(teacher)\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S RECORD DELETED FROM DATABASE\")\n\n\ndef employ_teacher():\n name = input(\"Enter teacher's name: \")\n address = input(\"Enter teacher's address: \")\n subject = input(\"Enter teacher's subject: \")\n clas = input(\"Enter teacher's class: \")\n name = Teachers(name, address, subject, clas)\n name.employ()\n\n\ndef update_teacher_info():\n mycursor.execute('SELECT id, name, address, subject, class FROM teachers')\n rows = mycursor.fetchall()\n teacher = str(input(\"Enter teacher's name as it appears in the portal: \"))\n mycursor.execute(\n \"SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'\"\n .format(teacher))\n rows = mycursor.fetchall()\n ind = len(rows)\n if ind < 1:\n print('No teacher with the name')\n print('Make sure you enter the name as it is in the database \\n')\n update_teacher_info()\n elif ind >= 1:\n edit_name = str(input(\n \"Do you want to edit name? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_name = edit_name.upper()\n if edit_name == 'YES':\n edit_name = str(input('Enter the correct name: '))\n elif edit_name == 'NO':\n edit_name = rows[1]\n edit_address = str(input(\n \"Do you want to edit address? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_address = edit_address.upper()\n if edit_address == 'YES':\n edit_address = str(input('Enter the correct address: '))\n elif edit_address == 'NO':\n edit_address = rows[2]\n edit_subject = str(input(\n \"Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_subject = edit_subject.upper()\n if edit_subject == 'YES':\n edit_subject = str(input('Enter the correct subject: '))\n elif edit_subject == 'NO':\n edit_subject = rows[3]\n edit_class = str(input(\n \"Do you want to edit class? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_class = edit_class.upper()\n if edit_class == 'YES':\n edit_class = str(input('Enter the correct class: '))\n elif edit_class == 'NO':\n edit_class = rows[4]\n sql = (\n \"UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}\"\n .format(edit_name, edit_address, edit_subject, edit_class, rows[0])\n )\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S DATABASE EDITED SUCCESSFULLY\")\n else:\n print('Invalid input \\n')\n update_teacher_info()\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\ndef view_all_teachers():\n mycursor.execute('SELECT * FROM teachers')\n rows = mycursor.fetchall()\n for row in rows:\n print(row)\n\n\n<function token>\n\n\ndef employ_teacher():\n name = input(\"Enter teacher's name: \")\n address = input(\"Enter teacher's address: \")\n subject = input(\"Enter teacher's subject: \")\n clas = input(\"Enter teacher's class: \")\n name = Teachers(name, address, subject, clas)\n name.employ()\n\n\ndef update_teacher_info():\n mycursor.execute('SELECT id, name, address, subject, class FROM teachers')\n rows = mycursor.fetchall()\n teacher = str(input(\"Enter teacher's name as it appears in the portal: \"))\n mycursor.execute(\n \"SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'\"\n .format(teacher))\n rows = mycursor.fetchall()\n ind = len(rows)\n if ind < 1:\n print('No teacher with the name')\n print('Make sure you enter the name as it is in the database \\n')\n update_teacher_info()\n elif ind >= 1:\n edit_name = str(input(\n \"Do you want to edit name? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_name = edit_name.upper()\n if edit_name == 'YES':\n edit_name = str(input('Enter the correct name: '))\n elif edit_name == 'NO':\n edit_name = rows[1]\n edit_address = str(input(\n \"Do you want to edit address? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_address = edit_address.upper()\n if edit_address == 'YES':\n edit_address = str(input('Enter the correct address: '))\n elif edit_address == 'NO':\n edit_address = rows[2]\n edit_subject = str(input(\n \"Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_subject = edit_subject.upper()\n if edit_subject == 'YES':\n edit_subject = str(input('Enter the correct subject: '))\n elif edit_subject == 'NO':\n edit_subject = rows[3]\n edit_class = str(input(\n \"Do you want to edit class? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_class = edit_class.upper()\n if edit_class == 'YES':\n edit_class = str(input('Enter the correct class: '))\n elif edit_class == 'NO':\n edit_class = rows[4]\n sql = (\n \"UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}\"\n .format(edit_name, edit_address, edit_subject, edit_class, rows[0])\n )\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S DATABASE EDITED SUCCESSFULLY\")\n else:\n print('Invalid input \\n')\n update_teacher_info()\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\ndef view_all_teachers():\n mycursor.execute('SELECT * FROM teachers')\n rows = mycursor.fetchall()\n for row in rows:\n print(row)\n\n\n<function token>\n<function token>\n\n\ndef update_teacher_info():\n mycursor.execute('SELECT id, name, address, subject, class FROM teachers')\n rows = mycursor.fetchall()\n teacher = str(input(\"Enter teacher's name as it appears in the portal: \"))\n mycursor.execute(\n \"SELECT id, name, address, subject, class FROM teachers WHERE name = '{}'\"\n .format(teacher))\n rows = mycursor.fetchall()\n ind = len(rows)\n if ind < 1:\n print('No teacher with the name')\n print('Make sure you enter the name as it is in the database \\n')\n update_teacher_info()\n elif ind >= 1:\n edit_name = str(input(\n \"Do you want to edit name? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_name = edit_name.upper()\n if edit_name == 'YES':\n edit_name = str(input('Enter the correct name: '))\n elif edit_name == 'NO':\n edit_name = rows[1]\n edit_address = str(input(\n \"Do you want to edit address? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_address = edit_address.upper()\n if edit_address == 'YES':\n edit_address = str(input('Enter the correct address: '))\n elif edit_address == 'NO':\n edit_address = rows[2]\n edit_subject = str(input(\n \"Do you want to edit subject? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_subject = edit_subject.upper()\n if edit_subject == 'YES':\n edit_subject = str(input('Enter the correct subject: '))\n elif edit_subject == 'NO':\n edit_subject = rows[3]\n edit_class = str(input(\n \"Do you want to edit class? Type 'YES' to edit or 'NO' to continue: \"\n ))\n edit_class = edit_class.upper()\n if edit_class == 'YES':\n edit_class = str(input('Enter the correct class: '))\n elif edit_class == 'NO':\n edit_class = rows[4]\n sql = (\n \"UPDATE students SET name = '{}', address = '{}', subject = '{}', class = '{}' WHERE id = {}\"\n .format(edit_name, edit_address, edit_subject, edit_class, rows[0])\n )\n mycursor.execute(sql)\n mdb.commit()\n print('')\n print(\"TEACHER'S DATABASE EDITED SUCCESSFULLY\")\n else:\n print('Invalid input \\n')\n update_teacher_info()\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\ndef view_all_teachers():\n mycursor.execute('SELECT * FROM teachers')\n rows = mycursor.fetchall()\n for row in rows:\n print(row)\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n\n def employ(self):\n sql = (\n 'INSERT INTO teachers(name, address, subject, class) VALUES (%s, %s, %s, %s)'\n )\n values = self.name, self.address, self.subject, self.clas\n self.mycursor.execute(sql, values)\n self.mdb.commit()\n print('Added to database')\n print('')\n print('CONGRATULATIONS TO THE NEWLY EMPLOYED TEACHER')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n\n def __init__(self, name, address, subject, clas):\n self.mdb = mysql.connector.connect(**mdbConfig)\n self.name = name\n self.address = address\n self.subject = subject\n self.clas = clas\n self.mycursor = self.mdb.cursor()\n print('You are connected to the database')\n print(self.mdb)\n\n def __del__(self):\n self.mdb.close()\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n <function token>\n\n def __del__(self):\n self.mdb.close()\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\nclass Teachers:\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,474 |
d5a4e0bd6553d8cde0a77cf5356c50318ce83cfc
|
import torch.nn as nn
import numpy as np
import os
from io import open
import torch
# Data Processor
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
print("Processing Train Data")
self.train = self.tokenize(os.path.join(path, 'train.txt'))
print("Processing Valid Data")
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
print("Processing Test Data")
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
# RNN Models
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,
tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu', 'RNN': 'tanh'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_weights(self):
for p in self.parameters():
if p.dim() > 1:
# Matrices
val_range = (3.0/np.max(p.shape))**0.5
p.data.uniform_(-val_range, val_range)
else:
# Vectors/Bias
p.data.zero_()
def get_default_init(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
def get_burnin_init(self, X_burnin):
batch_size = X_burnin.shape[1]
default_init = self.get_default_init(batch_size)
_, burnin_init = self.forward(X_burnin, default_init)
return burnin_init
|
[
"import torch.nn as nn\nimport numpy as np\nimport os\nfrom io import open\nimport torch\n\n# Data Processor\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\n\nclass Corpus(object):\n def __init__(self, path):\n self.dictionary = Dictionary()\n print(\"Processing Train Data\")\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print(\"Processing Valid Data\")\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print(\"Processing Test Data\")\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r', encoding=\"utf8\") as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r', encoding=\"utf8\") as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n# RNN Models\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu', 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError( \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\")\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n if nhid != ninp:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n # Matrices\n val_range = (3.0/np.max(p.shape))**0.5\n p.data.uniform_(-val_range, val_range)\n else:\n # Vectors/Bias\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (weight.new_zeros(self.nlayers, bsz, self.nhid),\n weight.new_zeros(self.nlayers, bsz, self.nhid))\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n\n\n\n\n",
"import torch.nn as nn\nimport numpy as np\nimport os\nfrom io import open\nimport torch\n\n\nclass Dictionary(object):\n\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n\n\nclass Dictionary(object):\n\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n\n\nclass Dictionary(object):\n\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n return self.word2idx[word]\n <function token>\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n\n\nclass Dictionary(object):\n\n def __init__(self):\n self.word2idx = {}\n self.idx2word = []\n <function token>\n <function token>\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n\n\nclass Dictionary(object):\n <function token>\n <function token>\n <function token>\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n<class token>\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n with open(path, 'r', encoding='utf8') as f:\n tokens = 0\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r', encoding='utf8') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n<class token>\n\n\nclass Corpus(object):\n\n def __init__(self, path):\n self.dictionary = Dictionary()\n print('Processing Train Data')\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n print('Processing Valid Data')\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n print('Processing Test Data')\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n <function token>\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n<class token>\n\n\nclass Corpus(object):\n <function token>\n <function token>\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n <docstring token>\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n def get_burnin_init(self, X_burnin):\n batch_size = X_burnin.shape[1]\n default_init = self.get_default_init(batch_size)\n _, burnin_init = self.forward(X_burnin, default_init)\n return burnin_init\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n <docstring token>\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=\n dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu',\n 'RNN': 'tanh'}[rnn_type]\n except KeyError:\n raise ValueError(\n \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN', 'RNN_TANH' or 'RNN_RELU']\"\"\"\n )\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=\n nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n if tie_weights:\n if nhid != ninp:\n raise ValueError(\n 'When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n self.init_weights()\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n <function token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n <docstring token>\n <function token>\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n val_range = (3.0 / np.max(p.shape)) ** 0.5\n p.data.uniform_(-val_range, val_range)\n else:\n p.data.zero_()\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n <function token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n <docstring token>\n <function token>\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n <function token>\n\n def get_default_init(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return weight.new_zeros(self.nlayers, bsz, self.nhid\n ), weight.new_zeros(self.nlayers, bsz, self.nhid)\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n <function token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n <docstring token>\n <function token>\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output.view(output.size(0) * output.size(1),\n output.size(2)))\n return decoded.view(output.size(0), output.size(1), decoded.size(1)\n ), hidden\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass RNNModel(nn.Module):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
98,475 |
24c57ca5fd23faa79897de7c25c7e3db7fa2ebb3
|
"""
Provides base classes and utils for implementing type-specific logical
view of Columns.
"""
import numpy as np
import pandas as pd
from numba import cuda, njit
from .buffer import Buffer
from . import utils, cudautils
from .column import Column
class TypedColumnBase(Column):
"""Base class for all typed column
e.g. NumericalColumn, CategoricalColumn
This class provides common operations to implement logical view and
type-based operations for the column.
Notes
-----
For designed to be instantiated directly. Instantiate subclasses instead.
"""
def __init__(self, **kwargs):
dtype = kwargs.pop('dtype')
super(TypedColumnBase, self).__init__(**kwargs)
# Logical dtype
self._dtype = dtype
@property
def dtype(self):
return self._dtype
def is_type_equivalent(self, other):
"""Is the logical type of the column equal to the other column.
"""
mine = self._replace_defaults()
theirs = other._replace_defaults()
def remove_base(dct):
# removes base attributes in the phyiscal layer.
basekeys = Column._replace_defaults(self).keys()
for k in basekeys:
del dct[k]
remove_base(mine)
remove_base(theirs)
return type(self) == type(other) and mine == theirs
def _replace_defaults(self):
params = super(TypedColumnBase, self)._replace_defaults()
params.update(dict(dtype=self._dtype))
return params
def argsort(self, ascending):
_, inds = self.sort_by_values(ascending=ascending)
return inds
def sort_by_values(self, ascending):
raise NotImplementedError
def column_empty_like(column, dtype, masked):
"""Allocate a new column like the given *column*
"""
data = cuda.device_array(shape=len(column), dtype=dtype)
params = dict(data=Buffer(data))
if masked:
mask = utils.make_mask(data.size)
params.update(dict(mask=Buffer(mask), null_count=data.size))
return Column(**params)
def column_empty_like_same_mask(column, dtype):
"""Create a new empty Column with the same length and the same mask.
Parameters
----------
dtype : np.dtype like
The dtype of the data buffer.
"""
data = cuda.device_array(shape=len(column), dtype=dtype)
params = dict(data=Buffer(data))
if column.has_null_mask:
params.update(mask=column.nullmask)
return Column(**params)
def column_select_by_boolmask(column, boolmask):
"""Select by a boolean mask to a column.
Returns (selected_column, selected_positions)
"""
from .numerical import NumericalColumn
assert not column.has_null_mask
boolbits = cudautils.compact_mask_bytes(boolmask.to_gpu_array())
indices = cudautils.arange(len(boolmask))
_, selinds = cudautils.copy_to_dense(indices, mask=boolbits)
_, selvals = cudautils.copy_to_dense(column.data.to_gpu_array(),
mask=boolbits)
assert not column.has_null_mask # the nullmask needs to be recomputed
selected_values = column.replace(data=Buffer(selvals))
selected_index = Buffer(selinds)
return selected_values, NumericalColumn(data=selected_index,
dtype=selected_index.dtype)
def as_column(arbitrary):
"""Create a Column from an arbitrary object
Currently support inputs are:
* ``Column``
* ``Buffer``
* numba device array
* numpy array
* pandas.Categorical
Returns
-------
result : subclass of TypedColumnBase
- CategoricalColumn for pandas.Categorical input.
- NumericalColumn for all other inputs.
"""
from . import numerical, categorical
if isinstance(arbitrary, Column):
if not isinstance(arbitrary, TypedColumnBase):
# interpret as numeric
return arbitrary.view(numerical.NumericalColumn,
dtype=arbitrary.dtype)
else:
return arbitrary
elif isinstance(arbitrary, pd.Categorical):
return categorical.pandas_categorical_as_column(arbitrary)
elif isinstance(arbitrary, Buffer):
return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)
elif cuda.devicearray.is_cuda_ndarray(arbitrary):
return as_column(Buffer(arbitrary))
elif isinstance(arbitrary, np.ndarray):
return as_column(Buffer(arbitrary))
else:
return as_column(np.asarray(arbitrary))
def column_applymap(udf, column, out_dtype):
"""Apply a elemenwise function to transform the values in the Column.
Parameters
----------
udf : function
Wrapped by numba jit for call on the GPU as a device function.
column : Column
The source column.
out_dtype : numpy.dtype
The dtype for use in the output.
Returns
-------
result : Buffer
"""
core = njit(udf)
results = cuda.device_array(shape=len(column), dtype=out_dtype)
values = column.data.to_gpu_array()
if column.mask:
# For masked columns
@cuda.jit
def kernel_masked(values, masks, results):
i = cuda.grid(1)
# in range?
if i < values.size:
# valid?
if utils.mask_get(masks, i):
# call udf
results[i] = core(values[i])
masks = column.mask.to_gpu_array()
kernel_masked.forall(len(column))(values, masks, results)
else:
# For non-masked columns
@cuda.jit
def kernel_non_masked(values, results):
i = cuda.grid(1)
# in range?
if i < values.size:
# call udf
results[i] = core(values[i])
kernel_non_masked.forall(len(column))(values, results)
# Output
return Buffer(results)
|
[
"\"\"\"\nProvides base classes and utils for implementing type-specific logical\nview of Columns.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom numba import cuda, njit\n\nfrom .buffer import Buffer\nfrom . import utils, cudautils\nfrom .column import Column\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n # Logical dtype\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n # removes base attributes in the phyiscal layer.\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n\n remove_base(mine)\n remove_base(theirs)\n\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\ndef column_empty_like(column, dtype, masked):\n \"\"\"Allocate a new column like the given *column*\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if masked:\n mask = utils.make_mask(data.size)\n params.update(dict(mask=Buffer(mask), null_count=data.size))\n return Column(**params)\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\ndef column_select_by_boolmask(column, boolmask):\n \"\"\"Select by a boolean mask to a column.\n\n Returns (selected_column, selected_positions)\n \"\"\"\n from .numerical import NumericalColumn\n assert not column.has_null_mask\n boolbits = cudautils.compact_mask_bytes(boolmask.to_gpu_array())\n indices = cudautils.arange(len(boolmask))\n _, selinds = cudautils.copy_to_dense(indices, mask=boolbits)\n _, selvals = cudautils.copy_to_dense(column.data.to_gpu_array(),\n mask=boolbits)\n\n assert not column.has_null_mask # the nullmask needs to be recomputed\n\n selected_values = column.replace(data=Buffer(selvals))\n selected_index = Buffer(selinds)\n return selected_values, NumericalColumn(data=selected_index,\n dtype=selected_index.dtype)\n\n\ndef as_column(arbitrary):\n \"\"\"Create a Column from an arbitrary object\n\n Currently support inputs are:\n\n * ``Column``\n * ``Buffer``\n * numba device array\n * numpy array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of TypedColumnBase\n - CategoricalColumn for pandas.Categorical input.\n - NumericalColumn for all other inputs.\n \"\"\"\n from . import numerical, categorical\n\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n # interpret as numeric\n return arbitrary.view(numerical.NumericalColumn,\n dtype=arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))\n\n\ndef column_applymap(udf, column, out_dtype):\n \"\"\"Apply a elemenwise function to transform the values in the Column.\n\n Parameters\n ----------\n udf : function\n Wrapped by numba jit for call on the GPU as a device function.\n column : Column\n The source column.\n out_dtype : numpy.dtype\n The dtype for use in the output.\n\n Returns\n -------\n result : Buffer\n \"\"\"\n core = njit(udf)\n results = cuda.device_array(shape=len(column), dtype=out_dtype)\n values = column.data.to_gpu_array()\n if column.mask:\n # For masked columns\n @cuda.jit\n def kernel_masked(values, masks, results):\n i = cuda.grid(1)\n # in range?\n if i < values.size:\n # valid?\n if utils.mask_get(masks, i):\n # call udf\n results[i] = core(values[i])\n\n masks = column.mask.to_gpu_array()\n kernel_masked.forall(len(column))(values, masks, results)\n else:\n # For non-masked columns\n @cuda.jit\n def kernel_non_masked(values, results):\n i = cuda.grid(1)\n # in range?\n if i < values.size:\n # call udf\n results[i] = core(values[i])\n\n kernel_non_masked.forall(len(column))(values, results)\n # Output\n return Buffer(results)\n",
"<docstring token>\nimport numpy as np\nimport pandas as pd\nfrom numba import cuda, njit\nfrom .buffer import Buffer\nfrom . import utils, cudautils\nfrom .column import Column\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\ndef column_empty_like(column, dtype, masked):\n \"\"\"Allocate a new column like the given *column*\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if masked:\n mask = utils.make_mask(data.size)\n params.update(dict(mask=Buffer(mask), null_count=data.size))\n return Column(**params)\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\ndef column_select_by_boolmask(column, boolmask):\n \"\"\"Select by a boolean mask to a column.\n\n Returns (selected_column, selected_positions)\n \"\"\"\n from .numerical import NumericalColumn\n assert not column.has_null_mask\n boolbits = cudautils.compact_mask_bytes(boolmask.to_gpu_array())\n indices = cudautils.arange(len(boolmask))\n _, selinds = cudautils.copy_to_dense(indices, mask=boolbits)\n _, selvals = cudautils.copy_to_dense(column.data.to_gpu_array(), mask=\n boolbits)\n assert not column.has_null_mask\n selected_values = column.replace(data=Buffer(selvals))\n selected_index = Buffer(selinds)\n return selected_values, NumericalColumn(data=selected_index, dtype=\n selected_index.dtype)\n\n\ndef as_column(arbitrary):\n \"\"\"Create a Column from an arbitrary object\n\n Currently support inputs are:\n\n * ``Column``\n * ``Buffer``\n * numba device array\n * numpy array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of TypedColumnBase\n - CategoricalColumn for pandas.Categorical input.\n - NumericalColumn for all other inputs.\n \"\"\"\n from . import numerical, categorical\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n return arbitrary.view(numerical.NumericalColumn, dtype=\n arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))\n\n\ndef column_applymap(udf, column, out_dtype):\n \"\"\"Apply a elemenwise function to transform the values in the Column.\n\n Parameters\n ----------\n udf : function\n Wrapped by numba jit for call on the GPU as a device function.\n column : Column\n The source column.\n out_dtype : numpy.dtype\n The dtype for use in the output.\n\n Returns\n -------\n result : Buffer\n \"\"\"\n core = njit(udf)\n results = cuda.device_array(shape=len(column), dtype=out_dtype)\n values = column.data.to_gpu_array()\n if column.mask:\n\n @cuda.jit\n def kernel_masked(values, masks, results):\n i = cuda.grid(1)\n if i < values.size:\n if utils.mask_get(masks, i):\n results[i] = core(values[i])\n masks = column.mask.to_gpu_array()\n kernel_masked.forall(len(column))(values, masks, results)\n else:\n\n @cuda.jit\n def kernel_non_masked(values, results):\n i = cuda.grid(1)\n if i < values.size:\n results[i] = core(values[i])\n kernel_non_masked.forall(len(column))(values, results)\n return Buffer(results)\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\ndef column_empty_like(column, dtype, masked):\n \"\"\"Allocate a new column like the given *column*\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if masked:\n mask = utils.make_mask(data.size)\n params.update(dict(mask=Buffer(mask), null_count=data.size))\n return Column(**params)\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\ndef column_select_by_boolmask(column, boolmask):\n \"\"\"Select by a boolean mask to a column.\n\n Returns (selected_column, selected_positions)\n \"\"\"\n from .numerical import NumericalColumn\n assert not column.has_null_mask\n boolbits = cudautils.compact_mask_bytes(boolmask.to_gpu_array())\n indices = cudautils.arange(len(boolmask))\n _, selinds = cudautils.copy_to_dense(indices, mask=boolbits)\n _, selvals = cudautils.copy_to_dense(column.data.to_gpu_array(), mask=\n boolbits)\n assert not column.has_null_mask\n selected_values = column.replace(data=Buffer(selvals))\n selected_index = Buffer(selinds)\n return selected_values, NumericalColumn(data=selected_index, dtype=\n selected_index.dtype)\n\n\ndef as_column(arbitrary):\n \"\"\"Create a Column from an arbitrary object\n\n Currently support inputs are:\n\n * ``Column``\n * ``Buffer``\n * numba device array\n * numpy array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of TypedColumnBase\n - CategoricalColumn for pandas.Categorical input.\n - NumericalColumn for all other inputs.\n \"\"\"\n from . import numerical, categorical\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n return arbitrary.view(numerical.NumericalColumn, dtype=\n arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))\n\n\ndef column_applymap(udf, column, out_dtype):\n \"\"\"Apply a elemenwise function to transform the values in the Column.\n\n Parameters\n ----------\n udf : function\n Wrapped by numba jit for call on the GPU as a device function.\n column : Column\n The source column.\n out_dtype : numpy.dtype\n The dtype for use in the output.\n\n Returns\n -------\n result : Buffer\n \"\"\"\n core = njit(udf)\n results = cuda.device_array(shape=len(column), dtype=out_dtype)\n values = column.data.to_gpu_array()\n if column.mask:\n\n @cuda.jit\n def kernel_masked(values, masks, results):\n i = cuda.grid(1)\n if i < values.size:\n if utils.mask_get(masks, i):\n results[i] = core(values[i])\n masks = column.mask.to_gpu_array()\n kernel_masked.forall(len(column))(values, masks, results)\n else:\n\n @cuda.jit\n def kernel_non_masked(values, results):\n i = cuda.grid(1)\n if i < values.size:\n results[i] = core(values[i])\n kernel_non_masked.forall(len(column))(values, results)\n return Buffer(results)\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\ndef column_empty_like(column, dtype, masked):\n \"\"\"Allocate a new column like the given *column*\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if masked:\n mask = utils.make_mask(data.size)\n params.update(dict(mask=Buffer(mask), null_count=data.size))\n return Column(**params)\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\ndef column_select_by_boolmask(column, boolmask):\n \"\"\"Select by a boolean mask to a column.\n\n Returns (selected_column, selected_positions)\n \"\"\"\n from .numerical import NumericalColumn\n assert not column.has_null_mask\n boolbits = cudautils.compact_mask_bytes(boolmask.to_gpu_array())\n indices = cudautils.arange(len(boolmask))\n _, selinds = cudautils.copy_to_dense(indices, mask=boolbits)\n _, selvals = cudautils.copy_to_dense(column.data.to_gpu_array(), mask=\n boolbits)\n assert not column.has_null_mask\n selected_values = column.replace(data=Buffer(selvals))\n selected_index = Buffer(selinds)\n return selected_values, NumericalColumn(data=selected_index, dtype=\n selected_index.dtype)\n\n\ndef as_column(arbitrary):\n \"\"\"Create a Column from an arbitrary object\n\n Currently support inputs are:\n\n * ``Column``\n * ``Buffer``\n * numba device array\n * numpy array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of TypedColumnBase\n - CategoricalColumn for pandas.Categorical input.\n - NumericalColumn for all other inputs.\n \"\"\"\n from . import numerical, categorical\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n return arbitrary.view(numerical.NumericalColumn, dtype=\n arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\ndef column_select_by_boolmask(column, boolmask):\n \"\"\"Select by a boolean mask to a column.\n\n Returns (selected_column, selected_positions)\n \"\"\"\n from .numerical import NumericalColumn\n assert not column.has_null_mask\n boolbits = cudautils.compact_mask_bytes(boolmask.to_gpu_array())\n indices = cudautils.arange(len(boolmask))\n _, selinds = cudautils.copy_to_dense(indices, mask=boolbits)\n _, selvals = cudautils.copy_to_dense(column.data.to_gpu_array(), mask=\n boolbits)\n assert not column.has_null_mask\n selected_values = column.replace(data=Buffer(selvals))\n selected_index = Buffer(selinds)\n return selected_values, NumericalColumn(data=selected_index, dtype=\n selected_index.dtype)\n\n\ndef as_column(arbitrary):\n \"\"\"Create a Column from an arbitrary object\n\n Currently support inputs are:\n\n * ``Column``\n * ``Buffer``\n * numba device array\n * numpy array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of TypedColumnBase\n - CategoricalColumn for pandas.Categorical input.\n - NumericalColumn for all other inputs.\n \"\"\"\n from . import numerical, categorical\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n return arbitrary.view(numerical.NumericalColumn, dtype=\n arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\n<function token>\n\n\ndef as_column(arbitrary):\n \"\"\"Create a Column from an arbitrary object\n\n Currently support inputs are:\n\n * ``Column``\n * ``Buffer``\n * numba device array\n * numpy array\n * pandas.Categorical\n\n Returns\n -------\n result : subclass of TypedColumnBase\n - CategoricalColumn for pandas.Categorical input.\n - NumericalColumn for all other inputs.\n \"\"\"\n from . import numerical, categorical\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n return arbitrary.view(numerical.NumericalColumn, dtype=\n arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n\n\ndef column_empty_like_same_mask(column, dtype):\n \"\"\"Create a new empty Column with the same length and the same mask.\n\n Parameters\n ----------\n dtype : np.dtype like\n The dtype of the data buffer.\n \"\"\"\n data = cuda.device_array(shape=len(column), dtype=dtype)\n params = dict(data=Buffer(data))\n if column.has_null_mask:\n params.update(mask=column.nullmask)\n return Column(**params)\n\n\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n \"\"\"Base class for all typed column\n e.g. NumericalColumn, CategoricalColumn\n\n This class provides common operations to implement logical view and\n type-based operations for the column.\n\n Notes\n -----\n For designed to be instantiated directly. Instantiate subclasses instead.\n \"\"\"\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n\n def _replace_defaults(self):\n params = super(TypedColumnBase, self)._replace_defaults()\n params.update(dict(dtype=self._dtype))\n return params\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n\n def __init__(self, **kwargs):\n dtype = kwargs.pop('dtype')\n super(TypedColumnBase, self).__init__(**kwargs)\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n <function token>\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n <function token>\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n <function token>\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n\n def sort_by_values(self, ascending):\n raise NotImplementedError\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n <function token>\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n <function token>\n\n def argsort(self, ascending):\n _, inds = self.sort_by_values(ascending=ascending)\n return inds\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n <function token>\n\n @property\n def dtype(self):\n return self._dtype\n\n def is_type_equivalent(self, other):\n \"\"\"Is the logical type of the column equal to the other column.\n \"\"\"\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n remove_base(mine)\n remove_base(theirs)\n return type(self) == type(other) and mine == theirs\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n <function token>\n\n @property\n def dtype(self):\n return self._dtype\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass TypedColumnBase(Column):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,476 |
d06cb9a9ffb9fc5af674e2cdd3a6b6ee838d18ef
|
text = "if you have ANY intutive idea"
print (text)
print(text.capitalize())
print (text.upper())
print(text.lower())
"""string.find(value, start, end) , it is almost similar to index but
find return -1 if value is not present and index return an excepjtion"""
print(text.find("have"))
print(text.find("v", 2,13)) #find v between the index value 2 and 13
print (text.index("ANY"))
print ("Is number: ", text.isalnum())
print ("Is alphabet:", text.isalpha()) #The isalpha() method returns True if all the characters are alphabet letters (a-z).
print ("Is Ascii:", text.isascii())
print ("Is decimal:", text.isdecimal())
print ("Is digit:", text.isdigit())
print ("Is numeric: ", text.isnumeric())
print ("Is lower: ", text.islower())
print ("Is space :", text.isspace())
print ("Is Title:", text.istitle())
print("Replace : ", text.replace("i", "It"))
print ("Split: ",text.split()) #Convert the string to list
print ("Title:", text.title())
print ("line Split:", text.splitlines())
|
[
"text = \"if you have ANY intutive idea\"\nprint (text)\n\nprint(text.capitalize())\nprint (text.upper())\nprint(text.lower())\n\n\n\"\"\"string.find(value, start, end) , it is almost similar to index but\nfind return -1 if value is not present and index return an excepjtion\"\"\"\nprint(text.find(\"have\")) \nprint(text.find(\"v\", 2,13)) #find v between the index value 2 and 13 \n\nprint (text.index(\"ANY\"))\nprint (\"Is number: \", text.isalnum())\nprint (\"Is alphabet:\", text.isalpha()) #The isalpha() method returns True if all the characters are alphabet letters (a-z).\nprint (\"Is Ascii:\", text.isascii())\nprint (\"Is decimal:\", text.isdecimal())\nprint (\"Is digit:\", text.isdigit())\nprint (\"Is numeric: \", text.isnumeric())\nprint (\"Is lower: \", text.islower())\nprint (\"Is space :\", text.isspace())\nprint (\"Is Title:\", text.istitle())\nprint(\"Replace : \", text.replace(\"i\", \"It\"))\nprint (\"Split: \",text.split()) #Convert the string to list \nprint (\"Title:\", text.title())\nprint (\"line Split:\", text.splitlines())",
"text = 'if you have ANY intutive idea'\nprint(text)\nprint(text.capitalize())\nprint(text.upper())\nprint(text.lower())\n<docstring token>\nprint(text.find('have'))\nprint(text.find('v', 2, 13))\nprint(text.index('ANY'))\nprint('Is number: ', text.isalnum())\nprint('Is alphabet:', text.isalpha())\nprint('Is Ascii:', text.isascii())\nprint('Is decimal:', text.isdecimal())\nprint('Is digit:', text.isdigit())\nprint('Is numeric: ', text.isnumeric())\nprint('Is lower: ', text.islower())\nprint('Is space :', text.isspace())\nprint('Is Title:', text.istitle())\nprint('Replace : ', text.replace('i', 'It'))\nprint('Split: ', text.split())\nprint('Title:', text.title())\nprint('line Split:', text.splitlines())\n",
"<assignment token>\nprint(text)\nprint(text.capitalize())\nprint(text.upper())\nprint(text.lower())\n<docstring token>\nprint(text.find('have'))\nprint(text.find('v', 2, 13))\nprint(text.index('ANY'))\nprint('Is number: ', text.isalnum())\nprint('Is alphabet:', text.isalpha())\nprint('Is Ascii:', text.isascii())\nprint('Is decimal:', text.isdecimal())\nprint('Is digit:', text.isdigit())\nprint('Is numeric: ', text.isnumeric())\nprint('Is lower: ', text.islower())\nprint('Is space :', text.isspace())\nprint('Is Title:', text.istitle())\nprint('Replace : ', text.replace('i', 'It'))\nprint('Split: ', text.split())\nprint('Title:', text.title())\nprint('line Split:', text.splitlines())\n",
"<assignment token>\n<code token>\n<docstring token>\n<code token>\n"
] | false |
98,477 |
17ed440509278fe6e0809ba831b89ebdd3f07adb
|
# if practice
has = input("if you have ticket(yes/no):")
len = int(input("pls input length of your knife(cm):"))
if has == "yes":
if len >=20:
print("your knife is too long,it's {}cm".format(len))
else:
print("you can come in now.")
else:
print("you need buy ticket first")
|
[
"# if practice\n\nhas = input(\"if you have ticket(yes/no):\")\n\nlen = int(input(\"pls input length of your knife(cm):\"))\n\nif has == \"yes\":\n if len >=20:\n print(\"your knife is too long,it's {}cm\".format(len))\n else:\n print(\"you can come in now.\")\nelse:\n print(\"you need buy ticket first\")\n",
"has = input('if you have ticket(yes/no):')\nlen = int(input('pls input length of your knife(cm):'))\nif has == 'yes':\n if len >= 20:\n print(\"your knife is too long,it's {}cm\".format(len))\n else:\n print('you can come in now.')\nelse:\n print('you need buy ticket first')\n",
"<assignment token>\nif has == 'yes':\n if len >= 20:\n print(\"your knife is too long,it's {}cm\".format(len))\n else:\n print('you can come in now.')\nelse:\n print('you need buy ticket first')\n",
"<assignment token>\n<code token>\n"
] | false |
98,478 |
7ae0f22ebc7a137c6c9b77091145fa1783acc3a1
|
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.ftchinese.com/channel/2016highlight.html")
bsObj = BeautifulSoup(html, "html.parser")
headlines = bsObj.findAll("div",{"class":"items"})[0]
rows = headlines.findAll("h2")
csvFile = open("C:/Users/aorui/Documents/aorick/my word/datafiles/fthl.csv", "wt", newline="", encoding=None)
writer = csv.writer(csvFile)
try:
for row in rows:
writer.writerow(row)#.get_text())
#遇到问题:.get_text()之后,汉字分离
#csvRow = []
#for cell in row.findAll(['h2']):
#csvRow.append(cell.get_text())
#writer.writerow(csvRow)
finally:
csvFile.close()
|
[
"import csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"http://www.ftchinese.com/channel/2016highlight.html\")\nbsObj = BeautifulSoup(html, \"html.parser\")\n\nheadlines = bsObj.findAll(\"div\",{\"class\":\"items\"})[0]\nrows = headlines.findAll(\"h2\")\n\ncsvFile = open(\"C:/Users/aorui/Documents/aorick/my word/datafiles/fthl.csv\", \"wt\", newline=\"\", encoding=None)\nwriter = csv.writer(csvFile)\n\ntry:\n for row in rows:\n writer.writerow(row)#.get_text())\n #遇到问题:.get_text()之后,汉字分离\n #csvRow = []\n #for cell in row.findAll(['h2']):\n #csvRow.append(cell.get_text())\n #writer.writerow(csvRow)\nfinally:\n csvFile.close()",
"import csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nhtml = urlopen('http://www.ftchinese.com/channel/2016highlight.html')\nbsObj = BeautifulSoup(html, 'html.parser')\nheadlines = bsObj.findAll('div', {'class': 'items'})[0]\nrows = headlines.findAll('h2')\ncsvFile = open('C:/Users/aorui/Documents/aorick/my word/datafiles/fthl.csv',\n 'wt', newline='', encoding=None)\nwriter = csv.writer(csvFile)\ntry:\n for row in rows:\n writer.writerow(row)\nfinally:\n csvFile.close()\n",
"<import token>\nhtml = urlopen('http://www.ftchinese.com/channel/2016highlight.html')\nbsObj = BeautifulSoup(html, 'html.parser')\nheadlines = bsObj.findAll('div', {'class': 'items'})[0]\nrows = headlines.findAll('h2')\ncsvFile = open('C:/Users/aorui/Documents/aorick/my word/datafiles/fthl.csv',\n 'wt', newline='', encoding=None)\nwriter = csv.writer(csvFile)\ntry:\n for row in rows:\n writer.writerow(row)\nfinally:\n csvFile.close()\n",
"<import token>\n<assignment token>\ntry:\n for row in rows:\n writer.writerow(row)\nfinally:\n csvFile.close()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,479 |
1e41e917a65e64cc9cfa62b82856406f2495c184
|
# coding: utf-8
#all template match code adapted from the DeepMoon project
#https://github.com/silburt/DeepMoon/blob/master/utils/template_match_target.py
import numpy as np
from skimage.feature import match_template
import cv2
import os
import sys
from keras.models import Model, load_model
#from keras.layers import Dense, Input, BatchNormalization, Dropout
#from keras.layers.merge import Add
#from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D, UpSampling2D
#from keras import backend as K
#from keras import optimizers
#from keras import metrics
import datetime
import glob
import random
from pandas import DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
class Match_Tiles(object):
'''
Match_Tiles is a test harness for running a variety of matching modifications quickly
__init__:
version is looking for a string slug for the model, to be used in file names
data_path is the string that will be passed to the glob function to grab files
targ_path is the string that will be passed to the glob function to grab files
VERBOSE is set to True by default, used for debugging, also used to decide whether images
are printed to the screen
'''
#Recommended import: from match_utils import Match_Tiles as mt
"""
Tuned Crater Detection Hyperparameters
--------------------------------------
minrad, maxrad : ints
radius range in match_template to search over.
longlat_thresh2, rad_thresh : floats
if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and
abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is
a duplicate of another crater candidate). In addition, when matching
CNN-detected rings to corresponding csvs (i.e. template_match_t2c),
the same criteria is used to determine a match.
template_thresh : float
0-1 range. If match_template probability > template_thresh, count as
detection.
target_thresh : float
0-1 range. target[target > target_thresh] = 1, otherwise 0
rw : int
1-32 range. Ring width, thickness of the rings used to match craters.
"""
longlat_thresh2_ = 1.8
rad_thresh_ = 1.0
template_thresh_ = 0.5
minrad_ = 6
maxrad_ = 140
target_thresh_ = 0.1
rw_ = 8
def __init__(self, model_version, model_path, data_path, targ_path, csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4,
RANDOMIZED=True, VERBOSE=True, log_str=''):
#Defaults tuned by DeepMoon team, all have '_' after
self.longlat_thresh2_ = 1.8
self.rad_thresh_ = 1.0
self.template_thresh_ = 0.5
self.minrad_ = 6
self.maxrad_ = 140
self.target_thresh_ = 0.1
self.rw_ = 8
#string name of model version, used for saving
self.version = model_version
self.verbose = VERBOSE
#load files from paths
self.data_arr = grab_files_list(data_path, self.verbose)
self.targ_arr = grab_files_list(targ_path, self.verbose)
self.csv_hu_arr = grab_files_list(csv_path, self.verbose)
#load model
self.model = load_model(model_path)
#crater coord params
self.coords_arr = None
self.rw = rw #8 or 4
self.minr_px = minrpx #6 #2km = 8.6 px
self.maxr_px = maxrpx #140 #32 km = 138.2 px
self.targ_thresh = tt
#set up logging capabilities
#docs: https://docs.python.org/2.3/lib/node304.html
logger_name = 'test_log' + get_time()
self.logger = logging.getLogger(logger_name)
hdlr = logging.FileHandler('log/match_test_log_'+str(model_version)+'_'+get_time()+'.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
self.logger.info('New log file created for this test: '+ model_version)
#report the model_path, data_path, targ_path, & csv_path
self.logger.info('Model path: '+ model_path)
self.logger.info('Data path: '+ data_path)
self.logger.info('Target path: '+ targ_path)
self.logger.info('CSV path: '+ csv_path)
# def run_match_all_tiles(self):
# #load files from paths
# #loop over number of tiles
# #check they are valid
# return None
def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn): #move this out of self-land
data = []
target = []
loadIm(data_fn, targ_fn, data, target, step=512, newpx = 512, px = 7680)
data = 2*np.array(data)-1
target = np.array(target)
print(data.shape)
# Load model
#fn_model = 'models/model_v13d_epochs_500_20180510_0458.h5'# d_epochs_500_20180508_0343.h5' #selected arbitrarily, v13a didn't work?
#mod = load_model(fn_model)
mod = self.model
print('Model loaded at: ' + get_time())
self.logger.info('Model loaded at: ' + get_time())
# Run model on one tile's worth of data
outs = mod.predict(data)
print('Prediction finished at: ' + get_time())
self.logger.info('Prediction finished at: ' + get_time())
#Make the model output back into a tile
tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)
#Make the orig data & target back into a tile (this should match the input target)
tile_data = remake_tile(data, tile_size=7680, SHOW=False)
tile_targ = remake_tile(target, tile_size=7680, SHOW=False)
print('Tiles put back together at: ' + get_time())
self.logger.info('Tiles put back together at: ' + get_time())
#make copy of tile_pred *because the template match changes the np array directly
copy_tile_pred = np.copy(tile_pred)
#call crater_match
tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=self.minr_px, maxrad=self.maxr_px,
longlat_thresh2=self.longlat_thresh2_, rad_thresh=self.rad_thresh_,
template_thresh=self.template_thresh_,
target_thresh=self.targ_thresh, rw=self.rw)
print('Coordinates determined from prediction at: ' + get_time())
self.logger.info('Coordinates determined from prediction at: ' + get_time())
#make image showing comparison
#crater_list_to_image(crater_array, img_size=2048)
tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)
print('Crater list in new image finished at: ' + get_time())
self.logger.info('Crater list in new image finished at: ' + get_time())
#four_image(data_image, targ_image, pred_image, find_image, start_x=0, start_y=0, wid_ht=1024)
four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0, start_y=0, wid_ht=1024)
return tile_pred, tile_crater_coords
def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):
csv_px_xyr = make_csv_px_array(csv_px_fn)
csv_coords = np.copy(csv_px_xyr)
copy_tile_pred = np.copy(tile_pred)
#\
stats, err, frac_dupes, templ_coords = self.template_match_t2c(copy_tile_pred,
csv_coords, templ_coords=list_coords,
minrad=self.minr_px, maxrad=self.maxr_px,
longlat_thresh2=self.longlat_thresh2_, rad_thresh=self.rad_thresh_,
template_thresh=self.template_thresh_, target_thresh=self.targ_thresh,
rw=self.rw, rmv_oor_csvs=0)
N_match, N_csv, N_detect, maxr = stats #maybe add frac_dupes to stats?
err_lo, err_la, err_r = err
#""""""
# Returns
# -------
# N_match : int
# Number of crater matches between your target and csv.
# N_csv : int
# Number of csv entries
# N_detect : int
# Total number of detected craters from target.
# maxr : int
# Radius of largest crater extracted from target.
# err_lo : float
# Mean longitude error between detected craters and csvs.
# err_la : float
# Mean latitude error between detected craters and csvs.
# err_r : float
# Mean radius error between detected craters and csvs.
# frac_dupes : float
#""""""
print('Number of matches: ' + str(N_match))
print('Number of csv entries: ' + str(N_csv))
print('Number of detected craters: ' + str(N_detect))
print('Max radius: ' + str(maxr))
print('err_lo: ' + str(err_lo))
print('err_la: ' + str(err_la))
print('err_r: ' + str(err_r))
print('frac_dupes: ' + str(frac_dupes))
return stats, err, frac_dupes, templ_coords, csv_px_xyr
def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,
template_thresh=template_thresh_,
target_thresh=target_thresh_, rw=rw_):
"""Extracts crater coordinates (in pixels) from a CNN-predicted target by
iteratively sliding rings through the image via match_template from
scikit-image.
Parameters
----------
target : array
CNN-predicted target.
minrad : integer
Minimum ring radius to search target over.
maxrad : integer
Maximum ring radius to search target over.
longlat_thresh2 : float
Minimum squared longitude/latitude difference between craters to be
considered distinct detections.
rad_thresh : float
Minimum fractional radius difference between craters to be considered
distinct detections.
template_thresh : float
Minimum match_template correlation coefficient to count as a detected
crater.
target_thresh : float
Value between 0-1. All pixels > target_thresh are set to 1, and
otherwise set to 0.
Returns
-------
coords : array
Pixel coordinates of successfully detected craters in predicted target.
"""
# thickness of rings for template match
#commented out because this is passed now
#rw = 8 #default 2 from DeepMoon project, we use 8 or 4
# threshold target
target[target >= target_thresh] = 1
target[target < target_thresh] = 0
radii = np.arange(minrad, maxrad + 1, 1, dtype=int)
coords = [] # coordinates extracted from template matching
corr = [] # correlation coefficient for coordinates set
for r in radii:
# template
n = 2 * (r + rw + 1)
template = np.zeros((n, n))
cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)
# template match - result is nxn array of probabilities
result = match_template(target, template, pad_input=True)
index_r = np.where(result > template_thresh)
coords_r = np.asarray(list(zip(*index_r)))
corr_r = np.asarray(result[index_r])
# store x,y,r
if len(coords_r) > 0:
for c in coords_r:
coords.append([c[1], c[0], r])
for l in corr_r:
corr.append(np.abs(l))
# remove duplicates from template matching at neighboring radii/locations
coords, corr = np.asarray(coords), np.asarray(corr)
i, N = 0, len(coords)
while i < N:
Long, Lat, Rad = coords.T
lo, la, r = coords[i]
minr = np.minimum(r, Rad)
dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2
dR = abs(Rad - r) / minr
index = (dR < rad_thresh) & (dL < longlat_thresh2)
if len(np.where(index == True)[0]) > 1:
# replace current coord with max match probability coord in
# duplicate list
coords_i = coords[np.where(index == True)]
corr_i = corr[np.where(index == True)]
coords[i] = coords_i[corr_i == np.max(corr_i)][0]
index[i] = False
coords = coords[np.where(index == False)]
N, i = len(coords), i + 1
return coords
def template_match_t2c(self, target, csv_coords, templ_coords=None, minrad=minrad_, maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_, template_thresh=template_thresh_,
target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):
"""Extracts crater coordinates (in pixels) from a CNN-predicted target and
compares the resulting detections to the corresponding human-counted crater
data.
Parameters
----------
target : array
CNN-predicted target.
csv_coords : array
Human-counted crater coordinates (in pixel units).
minrad : integer
Minimum ring radius to search target over.
maxrad : integer
Maximum ring radius to search target over.
longlat_thresh2 : float
Minimum squared longitude/latitude difference between craters to be
considered distinct detections.
rad_thresh : float
Minimum fractional radius difference between craters to be considered
distinct detections.
template_thresh : float
Minimum match_template correlation coefficient to count as a detected
crater.
target_thresh : float
Value between 0-1. All pixels > target_thresh are set to 1, and
otherwise set to 0.
rmv_oor_csvs : boolean, flag
If set to 1, remove craters from the csv that are outside your
detectable range.
Returns
-------
N_match : int
Number of crater matches between your target and csv.
N_csv : int
Number of csv entries
N_detect : int
Total number of detected craters from target.
maxr : int
Radius of largest crater extracted from target.
err_lo : float
Mean longitude error between detected craters and csvs.
err_la : float
Mean latitude error between detected craters and csvs.
err_r : float
Mean radius error between detected craters and csvs.
frac_dupes : float
Fraction of craters with multiple csv matches.
"""
# get coordinates from template matching IF they are not passed
if(templ_coords is None):
templ_coords = template_match_t(target, minrad, maxrad, longlat_thresh2,
rad_thresh, template_thresh, target_thresh, rw)
else:
print('Found craters: ' + str(len(templ_coords)))
self.logger.info('Found craters: ' + str(len(templ_coords)))
# find max detected crater radius
maxr = 0
if len(templ_coords > 0):
maxr = np.max(templ_coords.T[2])
# compare template-matched results to ground truth csv input data
N_match = 0
frac_dupes = 0
err_lo, err_la, err_r = 0, 0, 0
N_csv, N_detect = len(csv_coords), len(templ_coords)
for lo, la, r in templ_coords:
Long, Lat, Rad = csv_coords.T
minr = np.minimum(r, Rad)
dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2
dR = abs(Rad - r) / minr
index = (dR < rad_thresh) & (dL < longlat_thresh2)
index_True = np.where(index == True)[0]
N = len(index_True)
if N >= 1:
Lo, La, R = csv_coords[index_True[0]].T
meanr = (R + r) / 2.
err_lo += abs(Lo - lo) / meanr
err_la += abs(La - la) / meanr
err_r += abs(R - r) / meanr
if N > 1: # duplicate entries hurt recall
frac_dupes += (N-1) / float(len(templ_coords))
N_match += min(1, N)
# remove csv(s) so it can't be re-matched again
csv_coords = csv_coords[np.where(index == False)]
if len(csv_coords) == 0:
break
if rmv_oor_csvs == 1:
upper = 15
lower = minrad_
N_large_unmatched = len(np.where((csv_coords.T[2] > upper) |
(csv_coords.T[2] < lower))[0])
if N_large_unmatched < N_csv:
N_csv -= N_large_unmatched
if N_match >= 1:
err_lo = err_lo / N_match
err_la = err_la / N_match
err_r = err_r / N_match
stats = [N_match, N_csv, N_detect, maxr]
#self.logger.info('N_match')
err = [err_lo, err_la, err_r]
return stats, err, frac_dupes, templ_coords
def get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):
#https://stackoverflow.com/questions/2828059/sorting-arrays-in-numpy-by-column
csv_sub = np.copy(csv_arr_px)
np.sort(csv_sub, axis=0)
def get_time():
now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
return now
def grab_files_list(path, verbose):
arr = glob.glob(path) #(glob.glob("Robbins_Dataset/synth_out/extracted_04-16km_on_blur2/*.png"))
arr.sort()
if (verbose):
print(len(arr))
print(arr)
return arr
def loadIm(fname, tname, data, target, step=512, newpx = 512, px = 2048):
im = plt.imread(fname)
#px = 2048 #im.size #TODO: FIX THIS IT WILL BREAK EVERYTHING
print('max: ' + str(im.max()) + ', min: ' + str(im.min()) + ', mean: ' + str(im.mean()))
tim = 1*(plt.imread(tname)>0) #makes values of target binary
counter = 0
print(im.shape)
print(tim.shape)
for y in range(0,px,step): #no need to sub 512 b/c px are mult of 512
for x in range(0,px,step):
data.append(im[x:x+newpx,y:y+newpx].reshape((newpx,newpx,1)))
target.append(tim[x:x+newpx,y:y+newpx].reshape((newpx,newpx,1)))
def remake_tile(images, tile_size=7680, stp=512, SAVE=False, SHOW=False, img_fn=None):
#figure out the grid size
num_images = len(images)
grid_size = int(np.sqrt(num_images))
#stp = 512
#make list of coordinates
coords = []
for x in range(grid_size):
for y in range(grid_size):
coords.append([x*stp, y*stp])
grid_tile = np.zeros((tile_size,tile_size))
#place each subtile in the larger tile
for i, im in enumerate(images):
grid_tile[coords[i][1]:coords[i][1]+stp,coords[i][0]:coords[i][0]+stp] = im[:,:,0]
if(SHOW):
plt.imshow(grid_tile)
plt.gcf().set_size_inches((12,12))
plt.show()
if(SAVE and img_fn is not None):
plt.imsave(img_fn+'.png',grid_tile)
return grid_tile
#MAKE PICTURE FROM CRATER LIST
def crater_list_to_image(crater_array, img_size=2048):
craters_found_img = np.zeros((img_size,img_size))
for i in range(len(crater_array)):
x_ctr = crater_array[i][0]; y_ctr = crater_array[i][1]; r=crater_array[i][2]
brightness = 255; thick = 4
#cv2.circle(img, center, radius, color, thickness=1, lineType=8, shift=0)
cv2.circle(craters_found_img,(x_ctr,y_ctr), r, brightness, thick)
#print(x_ctr)
plt.gcf().set_size_inches((12,12))
plt.imshow(craters_found_img)
plt.show()
return craters_found_img
def four_image(data_image, targ_image, pred_image, find_image, start_x=0,
start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):
#Show Subset of Tile
sx=start_x; sy=start_y; swh=wid_ht
plt.subplot(2,2,1)
plt.title('Data')
plt.imshow(data_image[sx:sx+swh, sy:sy+swh])
plt.subplot(2,2,2)
plt.title('Target')
plt.imshow(targ_image[sx:sx+swh, sy:sy+swh])
plt.subplot(2,2,3)
plt.title('NN Prediction')
plt.colorbar()
plt.imshow(pred_image[sx:sx+swh, sy:sy+swh])
plt.subplot(2,2,4)
plt.title('Crater Finder Output')
#plt.colorbar()
plt.imshow(find_image[sx:sx+swh, sy:sy+swh])
plt.gcf().set_size_inches((12,12))
if(SAVE and img_fn is not None):
plt.imsave(img_fn+'.png',grid_tile)
if(SHOW):
plt.show()
#Make the csv px array, pull columns 3-5, reorder
def make_csv_px_array(csv_px_fn):
tile_csv = pd.read_csv(csv_px_fn)
tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6]) #numpy array
print(tile_csv_px)
tile_csv_px_xyr = np.copy(tile_csv_px) #making a copy isn't strictly necessary
#switch order of first two cols of new array from y-x-rad to x-y-rad
tile_csv_px_xyr[:,[0, 1, 2]] = tile_csv_px_xyr[:,[1, 0, 2]]
print(tile_csv_px_xyr)
return tile_csv_px_xyr
def make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=138.2, save_fn=None, SAVE=True, SHOW=False):
#load grayscale image, cv2 loads as color by default
#img = np.zeros((7680,7680,3), np.uint8) #start with black, color image
img = cv2.imread(img_fn) #default loads as color image even though grayscale
#make a copy of the numpy arrays
crater_array = np.copy(coords)
from_csv = np.copy(csv_px_xyr)
#Add All the Annotation Craters
counter = 0 #counter will be the number of craters within the px range
for i in range(len(from_csv)):
x_ctr = from_csv[i][0]; y_ctr = from_csv[i][1]; r=from_csv[i][2]
brightness = 255; thick = 8
#cv2.circle(img, center, radius, color, thickness=1, lineType=8, shift=0)
if(r<rpx_max and r>rpx_min):
#annotation craters in blue
cv2.circle(img,(x_ctr,y_ctr), r, (0,0,255), thick) #blue
counter=counter+1
print(counter)
for i in range(len(crater_array)): #found craters
x_ctr = crater_array[i][0]; y_ctr = crater_array[i][1]; r=crater_array[i][2]
brightness = 255; thick = 8
#cv2.circle(img, center, radius, color, thickness=1, lineType=8, shift=0)
#found craters in green
cv2.circle(img,(x_ctr,y_ctr), r, (0,255,0), int(thick/2)) #green
#if (SAVE is True and save_fn is not None):
# print('Saving file at: ' + save_fn + '.png')
# cv2.imwrite(save_fn + '.png', img) #GIANT file >100 MB
if(SHOW or SAVE):
plt.imshow(img)
plt.gcf().set_size_inches((12,12))
plt.xticks([]), plt.yticks([])
if (SAVE):
plt.savefig(save_fn + '.png')
if (SHOW):
plt.show()
plt.imshow(img[0:2048,0:2048,:])
plt.gcf().set_size_inches((12,12))
plt.xticks([]), plt.yticks([])
#plt.savefig(save_fn + '_zoom' + '.png')
#plt.show()
if (SAVE):
plt.savefig(save_fn + '_zoom' + '.png')
if (SHOW):
plt.show()
return counter
def run_all_tiles(mt):
for i in range(24):
mt.logger.info('Starting processing for Tile '+"{:02}".format(i))
mt.logger.info('CSV, human annotated: '+ mt.csv_hu_arr[i])
mt.logger.info('Data: '+ mt.data_arr[i])
mt.logger.info('Target: '+ mt.targ_arr[i])
print('\n\n\n\n')
print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\n', sep=' \n ')
data_fn = mt.data_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_filled.png'
targ_fn = mt.targ_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_2_32_km_segrng_8_edge.png'
csv_px_fn = mt.csv_hu_arr[i] #'Robbins_Dataset/csv/LatLonDiam_RobbinsCraters_20121016_-30_0_90_120_px.csv'
tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)
stats, err, frac_dupes, templ_coords, csv_px_xyr = mt.run_compare_one_tile(csv_px_fn, tile_pred, coords)
sv_fn = 'plots/found/Tile_'+"{:02}".format(i)+'_'+mt.version+'_'+get_time()+'_match_comparison'
craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr, save_fn=sv_fn)
mt.logger.info('Saved comparison plot: '+ sv_fn)
print('Matches Ratio (matches/craters_in_range): ' + str(stats[0]/craters_in_range))
mt.logger.info('Tile ' + "{:02}".format(i) + ' Matches: ' + str(stats[0]))
mt.logger.info('Tile ' + "{:02}".format(i) + ' Craters_in_range: ' + str(craters_in_range))
mt.logger.info('Tile ' + "{:02}".format(i) + ' Matches ratio (matches/craters_in_range): ' +
str(stats[0]/craters_in_range))
print('Done at: ' + get_time())
mt.logger.info('Done with Tile '+"{:02}".format(i))
mt.logger.info(' ')
print('\n\n\n\n')
def run_some_tiles(mt, run_list):
mt.logger.info('Running SOME tiles: ' + str(run_list))
for i in run_list:
mt.logger.info('Starting processing for Tile '+"{:02}".format(i))
mt.logger.info('CSV, human annotated: '+ mt.csv_hu_arr[i])
mt.logger.info('Data: '+ mt.data_arr[i])
mt.logger.info('Target: '+ mt.targ_arr[i])
print('\n\n\n\n')
print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\n', sep=' \n ')
data_fn = mt.data_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_filled.png'
targ_fn = mt.targ_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_2_32_km_segrng_8_edge.png'
csv_px_fn = mt.csv_hu_arr[i] #'Robbins_Dataset/csv/LatLonDiam_RobbinsCraters_20121016_-30_0_90_120_px.csv'
tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)
stats, err, frac_dupes, templ_coords, csv_px_xyr = mt.run_compare_one_tile(csv_px_fn, tile_pred, coords)
sv_fn = 'plots/found/Tile_'+"{:02}".format(i)+'_'+mt.version+'_'+get_time()+'_match_comparison'
craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr, save_fn=sv_fn)
mt.logger.info('Saved comparison plot: '+ sv_fn)
print('Matches Ratio (matches/craters_in_range): ' + str(stats[0]/craters_in_range))
mt.logger.info('Tile ' + "{:02}".format(i) + ' Matches: ' + str(stats[0]))
mt.logger.info('Tile ' + "{:02}".format(i) + ' Craters_in_range: ' + str(craters_in_range))
mt.logger.info('Tile ' + "{:02}".format(i) + ' Matches ratio (matches/craters_in_range): ' +
str(stats[0]/craters_in_range))
print('Done at: ' + get_time())
mt.logger.info('Done with Tile '+"{:02}".format(i))
mt.logger.info(' ')
print('\n\n\n\n')
|
[
"# coding: utf-8\n\n#all template match code adapted from the DeepMoon project\n#https://github.com/silburt/DeepMoon/blob/master/utils/template_match_target.py\n\nimport numpy as np\nfrom skimage.feature import match_template\nimport cv2\nimport os\nimport sys\nfrom keras.models import Model, load_model\n#from keras.layers import Dense, Input, BatchNormalization, Dropout\n#from keras.layers.merge import Add\n#from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D, UpSampling2D\n#from keras import backend as K\n#from keras import optimizers\n#from keras import metrics\nimport datetime\nimport glob\nimport random\nfrom pandas import DataFrame\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport logging\n\nclass Match_Tiles(object):\n '''\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n '''\n \n #Recommended import: from match_utils import Match_Tiles as mt\n \n \n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n \n def __init__(self, model_version, model_path, data_path, targ_path, csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4,\n RANDOMIZED=True, VERBOSE=True, log_str=''): \n\n #Defaults tuned by DeepMoon team, all have '_' after\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n\n #string name of model version, used for saving\n self.version = model_version\n \n self.verbose = VERBOSE\n #load files from paths\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n \n #load model\n self.model = load_model(model_path)\n \n #crater coord params\n self.coords_arr = None\n self.rw = rw #8 or 4\n self.minr_px = minrpx #6 #2km = 8.6 px\n self.maxr_px = maxrpx #140 #32 km = 138.2 px\n self.targ_thresh = tt\n \n #set up logging capabilities\n #docs: https://docs.python.org/2.3/lib/node304.html\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_'+str(model_version)+'_'+get_time()+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr) \n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: '+ model_version)\n #report the model_path, data_path, targ_path, & csv_path\n self.logger.info('Model path: '+ model_path)\n self.logger.info('Data path: '+ data_path)\n self.logger.info('Target path: '+ targ_path)\n self.logger.info('CSV path: '+ csv_path)\n \n# def run_match_all_tiles(self):\n# #load files from paths\n# #loop over number of tiles\n# #check they are valid \n# return None\n \n \n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn): #move this out of self-land \n data = []\n target = []\n\n loadIm(data_fn, targ_fn, data, target, step=512, newpx = 512, px = 7680)\n\n data = 2*np.array(data)-1\n target = np.array(target)\n\n print(data.shape)\n\n # Load model\n #fn_model = 'models/model_v13d_epochs_500_20180510_0458.h5'# d_epochs_500_20180508_0343.h5' #selected arbitrarily, v13a didn't work?\n #mod = load_model(fn_model)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n\n # Run model on one tile's worth of data\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n \n #Make the model output back into a tile\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n \n #Make the orig data & target back into a tile (this should match the input target)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n \n #make copy of tile_pred *because the template match changes the np array directly\n copy_tile_pred = np.copy(tile_pred)\n \n #call crater_match\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=self.minr_px, maxrad=self.maxr_px,\n longlat_thresh2=self.longlat_thresh2_, rad_thresh=self.rad_thresh_,\n template_thresh=self.template_thresh_,\n target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' + get_time())\n \n #make image showing comparison \n #crater_list_to_image(crater_array, img_size=2048)\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n \n #four_image(data_image, targ_image, pred_image, find_image, start_x=0, start_y=0, wid_ht=1024)\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0, start_y=0, wid_ht=1024)\n \n return tile_pred, tile_crater_coords \n \n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None): \n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n \n copy_tile_pred = np.copy(tile_pred)\n #\\\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(copy_tile_pred, \n csv_coords, templ_coords=list_coords, \n minrad=self.minr_px, maxrad=self.maxr_px, \n longlat_thresh2=self.longlat_thresh2_, rad_thresh=self.rad_thresh_, \n template_thresh=self.template_thresh_, target_thresh=self.targ_thresh, \n rw=self.rw, rmv_oor_csvs=0)\n \n N_match, N_csv, N_detect, maxr = stats #maybe add frac_dupes to stats?\n err_lo, err_la, err_r = err\n \n #\"\"\"\"\"\"\n # Returns\n # -------\n # N_match : int\n # Number of crater matches between your target and csv.\n # N_csv : int\n # Number of csv entries\n # N_detect : int\n # Total number of detected craters from target.\n # maxr : int\n # Radius of largest crater extracted from target.\n # err_lo : float\n # Mean longitude error between detected craters and csvs.\n # err_la : float\n # Mean latitude error between detected craters and csvs.\n # err_r : float\n # Mean radius error between detected craters and csvs.\n # frac_dupes : float\n #\"\"\"\"\"\"\n\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n \n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n \n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n\n # thickness of rings for template match\n #commented out because this is passed now\n #rw = 8 #default 2 from DeepMoon project, we use 8 or 4\n\n # threshold target\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = [] # coordinates extracted from template matching\n corr = [] # correlation coefficient for coordinates set\n for r in radii:\n # template\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n\n # template match - result is nxn array of probabilities\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n\n # store x,y,r\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n\n # remove duplicates from template matching at neighboring radii/locations\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n\n dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n # replace current coord with max match probability coord in\n # duplicate list\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n\n return coords\n\n \n def template_match_t2c(self, target, csv_coords, templ_coords=None, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n # get coordinates from template matching IF they are not passed\n if(templ_coords is None):\n templ_coords = template_match_t(target, minrad, maxrad, longlat_thresh2,\n rad_thresh, template_thresh, target_thresh, rw)\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n\n # find max detected crater radius\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n\n # compare template-matched results to ground truth csv input data\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n\n dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1: # duplicate entries hurt recall\n frac_dupes += (N-1) / float(len(templ_coords))\n N_match += min(1, N)\n # remove csv(s) so it can't be re-matched again\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) |\n (csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n\n stats = [N_match, N_csv, N_detect, maxr]\n #self.logger.info('N_match')\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n\n \ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n #https://stackoverflow.com/questions/2828059/sorting-arrays-in-numpy-by-column\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n \ndef get_time():\n now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M\")\n return now\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path) #(glob.glob(\"Robbins_Dataset/synth_out/extracted_04-16km_on_blur2/*.png\"))\n arr.sort()\n if (verbose):\n print(len(arr))\n print(arr)\n return arr\n\ndef loadIm(fname, tname, data, target, step=512, newpx = 512, px = 2048): \n im = plt.imread(fname)\n #px = 2048 #im.size #TODO: FIX THIS IT WILL BREAK EVERYTHING\n print('max: ' + str(im.max()) + ', min: ' + str(im.min()) + ', mean: ' + str(im.mean()))\n tim = 1*(plt.imread(tname)>0) #makes values of target binary\n counter = 0\n print(im.shape)\n print(tim.shape)\n for y in range(0,px,step): #no need to sub 512 b/c px are mult of 512\n for x in range(0,px,step):\n data.append(im[x:x+newpx,y:y+newpx].reshape((newpx,newpx,1)))\n target.append(tim[x:x+newpx,y:y+newpx].reshape((newpx,newpx,1)))\n\ndef remake_tile(images, tile_size=7680, stp=512, SAVE=False, SHOW=False, img_fn=None):\n #figure out the grid size\n num_images = len(images)\n grid_size = int(np.sqrt(num_images))\n #stp = 512\n \n #make list of coordinates \n coords = []\n for x in range(grid_size):\n for y in range(grid_size):\n coords.append([x*stp, y*stp])\n grid_tile = np.zeros((tile_size,tile_size))\n\n #place each subtile in the larger tile\n for i, im in enumerate(images):\n grid_tile[coords[i][1]:coords[i][1]+stp,coords[i][0]:coords[i][0]+stp] = im[:,:,0]\n \n if(SHOW):\n plt.imshow(grid_tile)\n plt.gcf().set_size_inches((12,12))\n plt.show()\n \n if(SAVE and img_fn is not None):\n plt.imsave(img_fn+'.png',grid_tile)\n \n return grid_tile \n\n#MAKE PICTURE FROM CRATER LIST\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size,img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]; y_ctr = crater_array[i][1]; r=crater_array[i][2]\n brightness = 255; thick = 4\n #cv2.circle(img, center, radius, color, thickness=1, lineType=8, shift=0)\n cv2.circle(craters_found_img,(x_ctr,y_ctr), r, brightness, thick) \n #print(x_ctr)\n \n plt.gcf().set_size_inches((12,12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0, \n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n #Show Subset of Tile\n sx=start_x; sy=start_y; swh=wid_ht\n \n plt.subplot(2,2,1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx+swh, sy:sy+swh])\n\n plt.subplot(2,2,2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx+swh, sy:sy+swh])\n\n plt.subplot(2,2,3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx+swh, sy:sy+swh])\n\n plt.subplot(2,2,4)\n plt.title('Crater Finder Output')\n #plt.colorbar()\n plt.imshow(find_image[sx:sx+swh, sy:sy+swh])\n\n plt.gcf().set_size_inches((12,12))\n \n if(SAVE and img_fn is not None):\n plt.imsave(img_fn+'.png',grid_tile)\n \n if(SHOW):\n plt.show()\n \n#Make the csv px array, pull columns 3-5, reorder\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6]) #numpy array\n print(tile_csv_px)\n\n tile_csv_px_xyr = np.copy(tile_csv_px) #making a copy isn't strictly necessary\n\n #switch order of first two cols of new array from y-x-rad to x-y-rad\n tile_csv_px_xyr[:,[0, 1, 2]] = tile_csv_px_xyr[:,[1, 0, 2]]\n\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=138.2, save_fn=None, SAVE=True, SHOW=False):\n #load grayscale image, cv2 loads as color by default\n #img = np.zeros((7680,7680,3), np.uint8) #start with black, color image\n img = cv2.imread(img_fn) #default loads as color image even though grayscale\n\n #make a copy of the numpy arrays\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n\n #Add All the Annotation Craters\n counter = 0 #counter will be the number of craters within the px range\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]; y_ctr = from_csv[i][1]; r=from_csv[i][2]\n brightness = 255; thick = 8\n #cv2.circle(img, center, radius, color, thickness=1, lineType=8, shift=0)\n if(r<rpx_max and r>rpx_min):\n #annotation craters in blue\n cv2.circle(img,(x_ctr,y_ctr), r, (0,0,255), thick) #blue\n counter=counter+1\n print(counter)\n\n for i in range(len(crater_array)): #found craters\n x_ctr = crater_array[i][0]; y_ctr = crater_array[i][1]; r=crater_array[i][2]\n brightness = 255; thick = 8\n #cv2.circle(img, center, radius, color, thickness=1, lineType=8, shift=0)\n #found craters in green\n cv2.circle(img,(x_ctr,y_ctr), r, (0,255,0), int(thick/2)) #green\n\n #if (SAVE is True and save_fn is not None):\n # print('Saving file at: ' + save_fn + '.png')\n # cv2.imwrite(save_fn + '.png', img) #GIANT file >100 MB\n \n if(SHOW or SAVE):\n plt.imshow(img)\n plt.gcf().set_size_inches((12,12))\n plt.xticks([]), plt.yticks([])\n if (SAVE):\n plt.savefig(save_fn + '.png')\n if (SHOW):\n plt.show()\n\n plt.imshow(img[0:2048,0:2048,:])\n plt.gcf().set_size_inches((12,12))\n plt.xticks([]), plt.yticks([])\n #plt.savefig(save_fn + '_zoom' + '.png')\n #plt.show()\n if (SAVE):\n plt.savefig(save_fn + '_zoom' + '.png')\n if (SHOW):\n plt.show()\n \n return counter\n\ndef run_all_tiles(mt):\n for i in range(24):\n mt.logger.info('Starting processing for Tile '+\"{:02}\".format(i))\n mt.logger.info('CSV, human annotated: '+ mt.csv_hu_arr[i])\n mt.logger.info('Data: '+ mt.data_arr[i])\n mt.logger.info('Target: '+ mt.targ_arr[i])\n\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=' \\n ')\n data_fn = mt.data_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_filled.png'\n targ_fn = mt.targ_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_2_32_km_segrng_8_edge.png'\n csv_px_fn = mt.csv_hu_arr[i] #'Robbins_Dataset/csv/LatLonDiam_RobbinsCraters_20121016_-30_0_90_120_px.csv'\n\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = mt.run_compare_one_tile(csv_px_fn, tile_pred, coords)\n\n sv_fn = 'plots/found/Tile_'+\"{:02}\".format(i)+'_'+mt.version+'_'+get_time()+'_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr, save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: '+ sv_fn)\n \n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0]/craters_in_range))\n mt.logger.info('Tile ' + \"{:02}\".format(i) + ' Matches: ' + str(stats[0]))\n mt.logger.info('Tile ' + \"{:02}\".format(i) + ' Craters_in_range: ' + str(craters_in_range))\n mt.logger.info('Tile ' + \"{:02}\".format(i) + ' Matches ratio (matches/craters_in_range): ' + \n str(stats[0]/craters_in_range))\n \n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile '+\"{:02}\".format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n \ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n \n for i in run_list:\n mt.logger.info('Starting processing for Tile '+\"{:02}\".format(i))\n mt.logger.info('CSV, human annotated: '+ mt.csv_hu_arr[i])\n mt.logger.info('Data: '+ mt.data_arr[i])\n mt.logger.info('Target: '+ mt.targ_arr[i])\n\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=' \\n ')\n data_fn = mt.data_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_filled.png'\n targ_fn = mt.targ_arr[i] #'Robbins_Dataset/out/thm_dir_N-30_090_-30_0_90_120_2_32_km_segrng_8_edge.png'\n csv_px_fn = mt.csv_hu_arr[i] #'Robbins_Dataset/csv/LatLonDiam_RobbinsCraters_20121016_-30_0_90_120_px.csv'\n\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = mt.run_compare_one_tile(csv_px_fn, tile_pred, coords)\n\n sv_fn = 'plots/found/Tile_'+\"{:02}\".format(i)+'_'+mt.version+'_'+get_time()+'_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr, save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: '+ sv_fn)\n \n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0]/craters_in_range))\n mt.logger.info('Tile ' + \"{:02}\".format(i) + ' Matches: ' + str(stats[0]))\n mt.logger.info('Tile ' + \"{:02}\".format(i) + ' Craters_in_range: ' + str(craters_in_range))\n mt.logger.info('Tile ' + \"{:02}\".format(i) + ' Matches ratio (matches/craters_in_range): ' + \n str(stats[0]/craters_in_range))\n \n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile '+\"{:02}\".format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n \n",
"import numpy as np\nfrom skimage.feature import match_template\nimport cv2\nimport os\nimport sys\nfrom keras.models import Model, load_model\nimport datetime\nimport glob\nimport random\nfrom pandas import DataFrame\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport logging\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\ndef get_time():\n now = datetime.datetime.now().strftime('%Y%m%d_%H%M')\n return now\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\ndef loadIm(fname, tname, data, target, step=512, newpx=512, px=2048):\n im = plt.imread(fname)\n print('max: ' + str(im.max()) + ', min: ' + str(im.min()) + ', mean: ' +\n str(im.mean()))\n tim = 1 * (plt.imread(tname) > 0)\n counter = 0\n print(im.shape)\n print(tim.shape)\n for y in range(0, px, step):\n for x in range(0, px, step):\n data.append(im[x:x + newpx, y:y + newpx].reshape((newpx, newpx, 1))\n )\n target.append(tim[x:x + newpx, y:y + newpx].reshape((newpx,\n newpx, 1)))\n\n\ndef remake_tile(images, tile_size=7680, stp=512, SAVE=False, SHOW=False,\n img_fn=None):\n num_images = len(images)\n grid_size = int(np.sqrt(num_images))\n coords = []\n for x in range(grid_size):\n for y in range(grid_size):\n coords.append([x * stp, y * stp])\n grid_tile = np.zeros((tile_size, tile_size))\n for i, im in enumerate(images):\n grid_tile[coords[i][1]:coords[i][1] + stp, coords[i][0]:coords[i][0\n ] + stp] = im[:, :, 0]\n if SHOW:\n plt.imshow(grid_tile)\n plt.gcf().set_size_inches((12, 12))\n plt.show()\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n return grid_tile\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=\n 138.2, save_fn=None, SAVE=True, SHOW=False):\n img = cv2.imread(img_fn)\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n counter = 0\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]\n y_ctr = from_csv[i][1]\n r = from_csv[i][2]\n brightness = 255\n thick = 8\n if r < rpx_max and r > rpx_min:\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 0, 255), thick)\n counter = counter + 1\n print(counter)\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 8\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 255, 0), int(thick / 2))\n if SHOW or SAVE:\n plt.imshow(img)\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '.png')\n if SHOW:\n plt.show()\n plt.imshow(img[0:2048, 0:2048, :])\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '_zoom' + '.png')\n if SHOW:\n plt.show()\n return counter\n\n\ndef run_all_tiles(mt):\n for i in range(24):\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\ndef get_time():\n now = datetime.datetime.now().strftime('%Y%m%d_%H%M')\n return now\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\ndef loadIm(fname, tname, data, target, step=512, newpx=512, px=2048):\n im = plt.imread(fname)\n print('max: ' + str(im.max()) + ', min: ' + str(im.min()) + ', mean: ' +\n str(im.mean()))\n tim = 1 * (plt.imread(tname) > 0)\n counter = 0\n print(im.shape)\n print(tim.shape)\n for y in range(0, px, step):\n for x in range(0, px, step):\n data.append(im[x:x + newpx, y:y + newpx].reshape((newpx, newpx, 1))\n )\n target.append(tim[x:x + newpx, y:y + newpx].reshape((newpx,\n newpx, 1)))\n\n\ndef remake_tile(images, tile_size=7680, stp=512, SAVE=False, SHOW=False,\n img_fn=None):\n num_images = len(images)\n grid_size = int(np.sqrt(num_images))\n coords = []\n for x in range(grid_size):\n for y in range(grid_size):\n coords.append([x * stp, y * stp])\n grid_tile = np.zeros((tile_size, tile_size))\n for i, im in enumerate(images):\n grid_tile[coords[i][1]:coords[i][1] + stp, coords[i][0]:coords[i][0\n ] + stp] = im[:, :, 0]\n if SHOW:\n plt.imshow(grid_tile)\n plt.gcf().set_size_inches((12, 12))\n plt.show()\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n return grid_tile\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=\n 138.2, save_fn=None, SAVE=True, SHOW=False):\n img = cv2.imread(img_fn)\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n counter = 0\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]\n y_ctr = from_csv[i][1]\n r = from_csv[i][2]\n brightness = 255\n thick = 8\n if r < rpx_max and r > rpx_min:\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 0, 255), thick)\n counter = counter + 1\n print(counter)\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 8\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 255, 0), int(thick / 2))\n if SHOW or SAVE:\n plt.imshow(img)\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '.png')\n if SHOW:\n plt.show()\n plt.imshow(img[0:2048, 0:2048, :])\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '_zoom' + '.png')\n if SHOW:\n plt.show()\n return counter\n\n\ndef run_all_tiles(mt):\n for i in range(24):\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\ndef get_time():\n now = datetime.datetime.now().strftime('%Y%m%d_%H%M')\n return now\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n\n\ndef remake_tile(images, tile_size=7680, stp=512, SAVE=False, SHOW=False,\n img_fn=None):\n num_images = len(images)\n grid_size = int(np.sqrt(num_images))\n coords = []\n for x in range(grid_size):\n for y in range(grid_size):\n coords.append([x * stp, y * stp])\n grid_tile = np.zeros((tile_size, tile_size))\n for i, im in enumerate(images):\n grid_tile[coords[i][1]:coords[i][1] + stp, coords[i][0]:coords[i][0\n ] + stp] = im[:, :, 0]\n if SHOW:\n plt.imshow(grid_tile)\n plt.gcf().set_size_inches((12, 12))\n plt.show()\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n return grid_tile\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=\n 138.2, save_fn=None, SAVE=True, SHOW=False):\n img = cv2.imread(img_fn)\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n counter = 0\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]\n y_ctr = from_csv[i][1]\n r = from_csv[i][2]\n brightness = 255\n thick = 8\n if r < rpx_max and r > rpx_min:\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 0, 255), thick)\n counter = counter + 1\n print(counter)\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 8\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 255, 0), int(thick / 2))\n if SHOW or SAVE:\n plt.imshow(img)\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '.png')\n if SHOW:\n plt.show()\n plt.imshow(img[0:2048, 0:2048, :])\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '_zoom' + '.png')\n if SHOW:\n plt.show()\n return counter\n\n\ndef run_all_tiles(mt):\n for i in range(24):\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\n<function token>\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n\n\ndef remake_tile(images, tile_size=7680, stp=512, SAVE=False, SHOW=False,\n img_fn=None):\n num_images = len(images)\n grid_size = int(np.sqrt(num_images))\n coords = []\n for x in range(grid_size):\n for y in range(grid_size):\n coords.append([x * stp, y * stp])\n grid_tile = np.zeros((tile_size, tile_size))\n for i, im in enumerate(images):\n grid_tile[coords[i][1]:coords[i][1] + stp, coords[i][0]:coords[i][0\n ] + stp] = im[:, :, 0]\n if SHOW:\n plt.imshow(grid_tile)\n plt.gcf().set_size_inches((12, 12))\n plt.show()\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n return grid_tile\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=\n 138.2, save_fn=None, SAVE=True, SHOW=False):\n img = cv2.imread(img_fn)\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n counter = 0\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]\n y_ctr = from_csv[i][1]\n r = from_csv[i][2]\n brightness = 255\n thick = 8\n if r < rpx_max and r > rpx_min:\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 0, 255), thick)\n counter = counter + 1\n print(counter)\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 8\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 255, 0), int(thick / 2))\n if SHOW or SAVE:\n plt.imshow(img)\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '.png')\n if SHOW:\n plt.show()\n plt.imshow(img[0:2048, 0:2048, :])\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '_zoom' + '.png')\n if SHOW:\n plt.show()\n return counter\n\n\ndef run_all_tiles(mt):\n for i in range(24):\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\n<function token>\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=\n 138.2, save_fn=None, SAVE=True, SHOW=False):\n img = cv2.imread(img_fn)\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n counter = 0\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]\n y_ctr = from_csv[i][1]\n r = from_csv[i][2]\n brightness = 255\n thick = 8\n if r < rpx_max and r > rpx_min:\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 0, 255), thick)\n counter = counter + 1\n print(counter)\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 8\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 255, 0), int(thick / 2))\n if SHOW or SAVE:\n plt.imshow(img)\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '.png')\n if SHOW:\n plt.show()\n plt.imshow(img[0:2048, 0:2048, :])\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '_zoom' + '.png')\n if SHOW:\n plt.show()\n return counter\n\n\ndef run_all_tiles(mt):\n for i in range(24):\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\n<function token>\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\ndef make_comparison_plot(img_fn, coords, csv_px_xyr, rpx_min=7.9, rpx_max=\n 138.2, save_fn=None, SAVE=True, SHOW=False):\n img = cv2.imread(img_fn)\n crater_array = np.copy(coords)\n from_csv = np.copy(csv_px_xyr)\n counter = 0\n for i in range(len(from_csv)):\n x_ctr = from_csv[i][0]\n y_ctr = from_csv[i][1]\n r = from_csv[i][2]\n brightness = 255\n thick = 8\n if r < rpx_max and r > rpx_min:\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 0, 255), thick)\n counter = counter + 1\n print(counter)\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 8\n cv2.circle(img, (x_ctr, y_ctr), r, (0, 255, 0), int(thick / 2))\n if SHOW or SAVE:\n plt.imshow(img)\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '.png')\n if SHOW:\n plt.show()\n plt.imshow(img[0:2048, 0:2048, :])\n plt.gcf().set_size_inches((12, 12))\n plt.xticks([]), plt.yticks([])\n if SAVE:\n plt.savefig(save_fn + '_zoom' + '.png')\n if SHOW:\n plt.show()\n return counter\n\n\n<function token>\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\n<function token>\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\n<function token>\n<function token>\n\n\ndef run_some_tiles(mt, run_list):\n mt.logger.info('Running SOME tiles: ' + str(run_list))\n for i in run_list:\n mt.logger.info('Starting processing for Tile ' + '{:02}'.format(i))\n mt.logger.info('CSV, human annotated: ' + mt.csv_hu_arr[i])\n mt.logger.info('Data: ' + mt.data_arr[i])\n mt.logger.info('Target: ' + mt.targ_arr[i])\n print('\\n\\n\\n\\n')\n print(mt.csv_hu_arr[i], mt.data_arr[i], mt.targ_arr[i], '\\n', sep=\n ' \\n ')\n data_fn = mt.data_arr[i]\n targ_fn = mt.targ_arr[i]\n csv_px_fn = mt.csv_hu_arr[i]\n tile_pred, coords = mt.run_match_one_tile(data_fn, targ_fn, csv_px_fn)\n stats, err, frac_dupes, templ_coords, csv_px_xyr = (mt.\n run_compare_one_tile(csv_px_fn, tile_pred, coords))\n sv_fn = 'plots/found/Tile_' + '{:02}'.format(i\n ) + '_' + mt.version + '_' + get_time() + '_match_comparison'\n craters_in_range = make_comparison_plot(data_fn, coords, csv_px_xyr,\n save_fn=sv_fn)\n mt.logger.info('Saved comparison plot: ' + sv_fn)\n print('Matches Ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Matches: ' + str(\n stats[0]))\n mt.logger.info('Tile ' + '{:02}'.format(i) + ' Craters_in_range: ' +\n str(craters_in_range))\n mt.logger.info('Tile ' + '{:02}'.format(i) +\n ' Matches ratio (matches/craters_in_range): ' + str(stats[0] /\n craters_in_range))\n print('Done at: ' + get_time())\n mt.logger.info('Done with Tile ' + '{:02}'.format(i))\n mt.logger.info(' ')\n print('\\n\\n\\n\\n')\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\ndef get_subset_ha(csv_arr_px, minr_px=6, maxr_px=140):\n csv_sub = np.copy(csv_arr_px)\n np.sort(csv_sub, axis=0)\n\n\n<function token>\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n\n\ndef grab_files_list(path, verbose):\n arr = glob.glob(path)\n arr.sort()\n if verbose:\n print(len(arr))\n print(arr)\n return arr\n\n\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\ndef make_csv_px_array(csv_px_fn):\n tile_csv = pd.read_csv(csv_px_fn)\n tile_csv_px = tile_csv.as_matrix(columns=tile_csv.columns[3:6])\n print(tile_csv_px)\n tile_csv_px_xyr = np.copy(tile_csv_px)\n tile_csv_px_xyr[:, [0, 1, 2]] = tile_csv_px_xyr[:, [1, 0, 2]]\n print(tile_csv_px_xyr)\n return tile_csv_px_xyr\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\ndef four_image(data_image, targ_image, pred_image, find_image, start_x=0,\n start_y=0, wid_ht=1024, img_fn=None, SAVE=False, SHOW=True):\n sx = start_x\n sy = start_y\n swh = wid_ht\n plt.subplot(2, 2, 1)\n plt.title('Data')\n plt.imshow(data_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 2)\n plt.title('Target')\n plt.imshow(targ_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 3)\n plt.title('NN Prediction')\n plt.colorbar()\n plt.imshow(pred_image[sx:sx + swh, sy:sy + swh])\n plt.subplot(2, 2, 4)\n plt.title('Crater Finder Output')\n plt.imshow(find_image[sx:sx + swh, sy:sy + swh])\n plt.gcf().set_size_inches((12, 12))\n if SAVE and img_fn is not None:\n plt.imsave(img_fn + '.png', grid_tile)\n if SHOW:\n plt.show()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef crater_list_to_image(crater_array, img_size=2048):\n craters_found_img = np.zeros((img_size, img_size))\n for i in range(len(crater_array)):\n x_ctr = crater_array[i][0]\n y_ctr = crater_array[i][1]\n r = crater_array[i][2]\n brightness = 255\n thick = 4\n cv2.circle(craters_found_img, (x_ctr, y_ctr), r, brightness, thick)\n plt.gcf().set_size_inches((12, 12))\n plt.imshow(craters_found_img)\n plt.show()\n return craters_found_img\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n \"\"\"\n Match_Tiles is a test harness for running a variety of matching modifications quickly\n __init__: \n version is looking for a string slug for the model, to be used in file names\n data_path is the string that will be passed to the glob function to grab files\n targ_path is the string that will be passed to the glob function to grab files\n VERBOSE is set to True by default, used for debugging, also used to decide whether images\n are printed to the screen\n \"\"\"\n \"\"\"\n Tuned Crater Detection Hyperparameters\n --------------------------------------\n minrad, maxrad : ints\n radius range in match_template to search over.\n longlat_thresh2, rad_thresh : floats\n if ((x1-x2)^2 + (y1-y2)^2) / min(r1,r2)^2 < longlat_thresh2 and\n abs(r1-r2) / min(r1,r2) < rad_thresh, remove (x2,y2,r2) circle (it is\n a duplicate of another crater candidate). In addition, when matching\n CNN-detected rings to corresponding csvs (i.e. template_match_t2c),\n the same criteria is used to determine a match.\n template_thresh : float\n 0-1 range. If match_template probability > template_thresh, count as \n detection.\n target_thresh : float\n 0-1 range. target[target > target_thresh] = 1, otherwise 0\n rw : int\n 1-32 range. Ring width, thickness of the rings used to match craters.\n \"\"\"\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n longlat_thresh2_ = 1.8\n rad_thresh_ = 1.0\n template_thresh_ = 0.5\n minrad_ = 6\n maxrad_ = 140\n target_thresh_ = 0.1\n rw_ = 8\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n\n def run_match_one_tile(self, data_fn, targ_fn, csv_px_fn):\n data = []\n target = []\n loadIm(data_fn, targ_fn, data, target, step=512, newpx=512, px=7680)\n data = 2 * np.array(data) - 1\n target = np.array(target)\n print(data.shape)\n mod = self.model\n print('Model loaded at: ' + get_time())\n self.logger.info('Model loaded at: ' + get_time())\n outs = mod.predict(data)\n print('Prediction finished at: ' + get_time())\n self.logger.info('Prediction finished at: ' + get_time())\n tile_pred = remake_tile(outs, tile_size=7680, SHOW=False)\n tile_data = remake_tile(data, tile_size=7680, SHOW=False)\n tile_targ = remake_tile(target, tile_size=7680, SHOW=False)\n print('Tiles put back together at: ' + get_time())\n self.logger.info('Tiles put back together at: ' + get_time())\n copy_tile_pred = np.copy(tile_pred)\n tile_crater_coords = self.template_match_t(copy_tile_pred, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.rw)\n print('Coordinates determined from prediction at: ' + get_time())\n self.logger.info('Coordinates determined from prediction at: ' +\n get_time())\n tile_found = crater_list_to_image(tile_crater_coords, img_size=7680)\n print('Crater list in new image finished at: ' + get_time())\n self.logger.info('Crater list in new image finished at: ' + get_time())\n four_image(tile_data, tile_targ, tile_pred, tile_found, start_x=0,\n start_y=0, wid_ht=1024)\n return tile_pred, tile_crater_coords\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n <function token>\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n\n def template_match_t2c(self, target, csv_coords, templ_coords=None,\n minrad=minrad_, maxrad=maxrad_, longlat_thresh2=longlat_thresh2_,\n rad_thresh=rad_thresh_, template_thresh=template_thresh_,\n target_thresh=target_thresh_, rw=rw_, rmv_oor_csvs=0):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target and\n compares the resulting detections to the corresponding human-counted crater\n data.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n csv_coords : array\n Human-counted crater coordinates (in pixel units).\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n rmv_oor_csvs : boolean, flag\n If set to 1, remove craters from the csv that are outside your\n detectable range.\n Returns\n -------\n N_match : int\n Number of crater matches between your target and csv.\n N_csv : int\n Number of csv entries\n N_detect : int\n Total number of detected craters from target.\n maxr : int\n Radius of largest crater extracted from target.\n err_lo : float\n Mean longitude error between detected craters and csvs.\n err_la : float\n Mean latitude error between detected craters and csvs.\n err_r : float\n Mean radius error between detected craters and csvs.\n frac_dupes : float\n Fraction of craters with multiple csv matches.\n \"\"\"\n if templ_coords is None:\n templ_coords = template_match_t(target, minrad, maxrad,\n longlat_thresh2, rad_thresh, template_thresh, target_thresh, rw\n )\n else:\n print('Found craters: ' + str(len(templ_coords)))\n self.logger.info('Found craters: ' + str(len(templ_coords)))\n maxr = 0\n if len(templ_coords > 0):\n maxr = np.max(templ_coords.T[2])\n N_match = 0\n frac_dupes = 0\n err_lo, err_la, err_r = 0, 0, 0\n N_csv, N_detect = len(csv_coords), len(templ_coords)\n for lo, la, r in templ_coords:\n Long, Lat, Rad = csv_coords.T\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n index_True = np.where(index == True)[0]\n N = len(index_True)\n if N >= 1:\n Lo, La, R = csv_coords[index_True[0]].T\n meanr = (R + r) / 2.0\n err_lo += abs(Lo - lo) / meanr\n err_la += abs(La - la) / meanr\n err_r += abs(R - r) / meanr\n if N > 1:\n frac_dupes += (N - 1) / float(len(templ_coords))\n N_match += min(1, N)\n csv_coords = csv_coords[np.where(index == False)]\n if len(csv_coords) == 0:\n break\n if rmv_oor_csvs == 1:\n upper = 15\n lower = minrad_\n N_large_unmatched = len(np.where((csv_coords.T[2] > upper) | (\n csv_coords.T[2] < lower))[0])\n if N_large_unmatched < N_csv:\n N_csv -= N_large_unmatched\n if N_match >= 1:\n err_lo = err_lo / N_match\n err_la = err_la / N_match\n err_r = err_r / N_match\n stats = [N_match, N_csv, N_detect, maxr]\n err = [err_lo, err_la, err_r]\n return stats, err, frac_dupes, templ_coords\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n <function token>\n\n def run_compare_one_tile(self, csv_px_fn, tile_pred, list_coords=None):\n csv_px_xyr = make_csv_px_array(csv_px_fn)\n csv_coords = np.copy(csv_px_xyr)\n copy_tile_pred = np.copy(tile_pred)\n stats, err, frac_dupes, templ_coords = self.template_match_t2c(\n copy_tile_pred, csv_coords, templ_coords=list_coords, minrad=\n self.minr_px, maxrad=self.maxr_px, longlat_thresh2=self.\n longlat_thresh2_, rad_thresh=self.rad_thresh_, template_thresh=\n self.template_thresh_, target_thresh=self.targ_thresh, rw=self.\n rw, rmv_oor_csvs=0)\n N_match, N_csv, N_detect, maxr = stats\n err_lo, err_la, err_r = err\n print('Number of matches: ' + str(N_match))\n print('Number of csv entries: ' + str(N_csv))\n print('Number of detected craters: ' + str(N_detect))\n print('Max radius: ' + str(maxr))\n print('err_lo: ' + str(err_lo))\n print('err_la: ' + str(err_la))\n print('err_r: ' + str(err_r))\n print('frac_dupes: ' + str(frac_dupes))\n return stats, err, frac_dupes, templ_coords, csv_px_xyr\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, model_version, model_path, data_path, targ_path,\n csv_path, rw=8, minrpx=7, maxrpx=140, tt=0.4, RANDOMIZED=True,\n VERBOSE=True, log_str=''):\n self.longlat_thresh2_ = 1.8\n self.rad_thresh_ = 1.0\n self.template_thresh_ = 0.5\n self.minrad_ = 6\n self.maxrad_ = 140\n self.target_thresh_ = 0.1\n self.rw_ = 8\n self.version = model_version\n self.verbose = VERBOSE\n self.data_arr = grab_files_list(data_path, self.verbose)\n self.targ_arr = grab_files_list(targ_path, self.verbose)\n self.csv_hu_arr = grab_files_list(csv_path, self.verbose)\n self.model = load_model(model_path)\n self.coords_arr = None\n self.rw = rw\n self.minr_px = minrpx\n self.maxr_px = maxrpx\n self.targ_thresh = tt\n logger_name = 'test_log' + get_time()\n self.logger = logging.getLogger(logger_name)\n hdlr = logging.FileHandler('log/match_test_log_' + str(\n model_version) + '_' + get_time() + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.logger.info('New log file created for this test: ' + model_version\n )\n self.logger.info('Model path: ' + model_path)\n self.logger.info('Data path: ' + data_path)\n self.logger.info('Target path: ' + targ_path)\n self.logger.info('CSV path: ' + csv_path)\n <function token>\n <function token>\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def template_match_t(self, target, minrad=minrad_, maxrad=maxrad_,\n longlat_thresh2=longlat_thresh2_, rad_thresh=rad_thresh_,\n template_thresh=template_thresh_, target_thresh=target_thresh_, rw=rw_\n ):\n \"\"\"Extracts crater coordinates (in pixels) from a CNN-predicted target by\n iteratively sliding rings through the image via match_template from\n scikit-image.\n Parameters\n ----------\n target : array\n CNN-predicted target.\n minrad : integer\n Minimum ring radius to search target over.\n maxrad : integer\n Maximum ring radius to search target over.\n longlat_thresh2 : float\n Minimum squared longitude/latitude difference between craters to be\n considered distinct detections.\n rad_thresh : float\n Minimum fractional radius difference between craters to be considered\n distinct detections.\n template_thresh : float\n Minimum match_template correlation coefficient to count as a detected\n crater.\n target_thresh : float\n Value between 0-1. All pixels > target_thresh are set to 1, and\n otherwise set to 0.\n Returns\n -------\n coords : array\n Pixel coordinates of successfully detected craters in predicted target.\n \"\"\"\n target[target >= target_thresh] = 1\n target[target < target_thresh] = 0\n radii = np.arange(minrad, maxrad + 1, 1, dtype=int)\n coords = []\n corr = []\n for r in radii:\n n = 2 * (r + rw + 1)\n template = np.zeros((n, n))\n cv2.circle(template, (r + rw + 1, r + rw + 1), r, 1, rw)\n result = match_template(target, template, pad_input=True)\n index_r = np.where(result > template_thresh)\n coords_r = np.asarray(list(zip(*index_r)))\n corr_r = np.asarray(result[index_r])\n if len(coords_r) > 0:\n for c in coords_r:\n coords.append([c[1], c[0], r])\n for l in corr_r:\n corr.append(np.abs(l))\n coords, corr = np.asarray(coords), np.asarray(corr)\n i, N = 0, len(coords)\n while i < N:\n Long, Lat, Rad = coords.T\n lo, la, r = coords[i]\n minr = np.minimum(r, Rad)\n dL = ((Long - lo) ** 2 + (Lat - la) ** 2) / minr ** 2\n dR = abs(Rad - r) / minr\n index = (dR < rad_thresh) & (dL < longlat_thresh2)\n if len(np.where(index == True)[0]) > 1:\n coords_i = coords[np.where(index == True)]\n corr_i = corr[np.where(index == True)]\n coords[i] = coords_i[corr_i == np.max(corr_i)][0]\n index[i] = False\n coords = coords[np.where(index == False)]\n N, i = len(coords), i + 1\n return coords\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Match_Tiles(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,480 |
001360bdaacac925ff5b515a5d81f1883bd2bf40
|
from django.conf.urls import url
from django.views.generic import TemplateView
urlpatterns = [
url(r'^about$', TemplateView.as_view(
template_name="about.html"),
name="about"),
url(r'^method$', TemplateView.as_view(
template_name="method.html"),
name="method"),
url(r'^kj$', TemplateView.as_view(
template_name="kj.html"),
name="kj"),
url(r'^team$', TemplateView.as_view(
template_name="team.html"),
name="team"),
url(r'^partners$', TemplateView.as_view(
template_name="partners.html"),
name="partners"),
url(r'^friends$', TemplateView.as_view(
template_name="m_friends.html"),
name="friends"),
]
|
[
"from django.conf.urls import url\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n url(r'^about$', TemplateView.as_view(\n template_name=\"about.html\"),\n name=\"about\"),\n url(r'^method$', TemplateView.as_view(\n template_name=\"method.html\"),\n name=\"method\"),\n url(r'^kj$', TemplateView.as_view(\n template_name=\"kj.html\"),\n name=\"kj\"),\n url(r'^team$', TemplateView.as_view(\n template_name=\"team.html\"),\n name=\"team\"),\n url(r'^partners$', TemplateView.as_view(\n template_name=\"partners.html\"),\n name=\"partners\"),\n url(r'^friends$', TemplateView.as_view(\n template_name=\"m_friends.html\"),\n name=\"friends\"),\n]\n",
"from django.conf.urls import url\nfrom django.views.generic import TemplateView\nurlpatterns = [url('^about$', TemplateView.as_view(template_name=\n 'about.html'), name='about'), url('^method$', TemplateView.as_view(\n template_name='method.html'), name='method'), url('^kj$', TemplateView.\n as_view(template_name='kj.html'), name='kj'), url('^team$',\n TemplateView.as_view(template_name='team.html'), name='team'), url(\n '^partners$', TemplateView.as_view(template_name='partners.html'), name\n ='partners'), url('^friends$', TemplateView.as_view(template_name=\n 'm_friends.html'), name='friends')]\n",
"<import token>\nurlpatterns = [url('^about$', TemplateView.as_view(template_name=\n 'about.html'), name='about'), url('^method$', TemplateView.as_view(\n template_name='method.html'), name='method'), url('^kj$', TemplateView.\n as_view(template_name='kj.html'), name='kj'), url('^team$',\n TemplateView.as_view(template_name='team.html'), name='team'), url(\n '^partners$', TemplateView.as_view(template_name='partners.html'), name\n ='partners'), url('^friends$', TemplateView.as_view(template_name=\n 'm_friends.html'), name='friends')]\n",
"<import token>\n<assignment token>\n"
] | false |
98,481 |
210c5d7ffbdd48ac19ad79b9d99265bd9c2ddcf2
|
from django.conf import settings
def pytest_configure():
settings.configure(
DEBUG=True,
TEMPLATE_DEBUG=True,
DEBUG_PROPAGATE_EXCEPTIONS=True,
ALLOWED_HOSTS=['*'],
ADMINS=(),
MANAGERS=(),
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
},
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
},
TIME_ZONE='UTC',
LANGUAGE_CODE='en-uk',
SITE_ID=1,
USE_I18N=True,
USE_L10N=True,
MEDIA_ROOT='',
MEDIA_URL='',
SECRET_KEY='u@x-aj9(hoh#rb-^ymf#g2jx_hp0vj7u5#b@ag1n^seu9e!%cy',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
ROOT_URLCONF='testapp.urls',
TEMPLATE_DIRS=(),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'rest_framework_nested',
'testapp',
),
STATIC_URL='/static/',
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
),
AUTH_USER_MODEL='auth.User'
)
|
[
"from django.conf import settings\n\n\ndef pytest_configure():\n settings.configure(\n DEBUG=True,\n TEMPLATE_DEBUG=True,\n DEBUG_PROPAGATE_EXCEPTIONS=True,\n ALLOWED_HOSTS=['*'],\n ADMINS=(),\n MANAGERS=(),\n DATABASES={\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': '',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n },\n CACHES={\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n }\n },\n TIME_ZONE='UTC',\n LANGUAGE_CODE='en-uk',\n SITE_ID=1,\n USE_I18N=True,\n USE_L10N=True,\n MEDIA_ROOT='',\n MEDIA_URL='',\n SECRET_KEY='u@x-aj9(hoh#rb-^ymf#g2jx_hp0vj7u5#b@ag1n^seu9e!%cy',\n TEMPLATE_LOADERS=(\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ),\n MIDDLEWARE_CLASSES=(\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n ),\n ROOT_URLCONF='testapp.urls',\n TEMPLATE_DIRS=(),\n INSTALLED_APPS=(\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'rest_framework_nested',\n 'testapp',\n ),\n STATIC_URL='/static/',\n PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.SHA1PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher',\n 'django.contrib.auth.hashers.MD5PasswordHasher',\n 'django.contrib.auth.hashers.CryptPasswordHasher',\n ),\n AUTH_USER_MODEL='auth.User'\n )\n",
"from django.conf import settings\n\n\ndef pytest_configure():\n settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,\n DEBUG_PROPAGATE_EXCEPTIONS=True, ALLOWED_HOSTS=['*'], ADMINS=(),\n MANAGERS=(), DATABASES={'default': {'ENGINE':\n 'django.db.backends.sqlite3', 'NAME': '', 'USER': '', 'PASSWORD':\n '', 'HOST': '', 'PORT': ''}}, CACHES={'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache'}}, TIME_ZONE='UTC',\n LANGUAGE_CODE='en-uk', SITE_ID=1, USE_I18N=True, USE_L10N=True,\n MEDIA_ROOT='', MEDIA_URL='', SECRET_KEY=\n 'u@x-aj9(hoh#rb-^ymf#g2jx_hp0vj7u5#b@ag1n^seu9e!%cy',\n TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader'),\n MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware'),\n ROOT_URLCONF='testapp.urls', TEMPLATE_DIRS=(), INSTALLED_APPS=(\n 'django.contrib.auth', 'django.contrib.contenttypes',\n 'django.contrib.sessions', 'django.contrib.sites',\n 'django.contrib.messages', 'rest_framework_nested', 'testapp'),\n STATIC_URL='/static/', PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.SHA1PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher',\n 'django.contrib.auth.hashers.MD5PasswordHasher',\n 'django.contrib.auth.hashers.CryptPasswordHasher'), AUTH_USER_MODEL\n ='auth.User')\n",
"<import token>\n\n\ndef pytest_configure():\n settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,\n DEBUG_PROPAGATE_EXCEPTIONS=True, ALLOWED_HOSTS=['*'], ADMINS=(),\n MANAGERS=(), DATABASES={'default': {'ENGINE':\n 'django.db.backends.sqlite3', 'NAME': '', 'USER': '', 'PASSWORD':\n '', 'HOST': '', 'PORT': ''}}, CACHES={'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache'}}, TIME_ZONE='UTC',\n LANGUAGE_CODE='en-uk', SITE_ID=1, USE_I18N=True, USE_L10N=True,\n MEDIA_ROOT='', MEDIA_URL='', SECRET_KEY=\n 'u@x-aj9(hoh#rb-^ymf#g2jx_hp0vj7u5#b@ag1n^seu9e!%cy',\n TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader'),\n MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware'),\n ROOT_URLCONF='testapp.urls', TEMPLATE_DIRS=(), INSTALLED_APPS=(\n 'django.contrib.auth', 'django.contrib.contenttypes',\n 'django.contrib.sessions', 'django.contrib.sites',\n 'django.contrib.messages', 'rest_framework_nested', 'testapp'),\n STATIC_URL='/static/', PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.SHA1PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher',\n 'django.contrib.auth.hashers.MD5PasswordHasher',\n 'django.contrib.auth.hashers.CryptPasswordHasher'), AUTH_USER_MODEL\n ='auth.User')\n",
"<import token>\n<function token>\n"
] | false |
98,482 |
01955bd5532e1923b51cf15329c505ae2dafa069
|
import matplotlib.pyplot as plt
import numpy as np
def draw_dy_sample():
# 画 f(x)-y VS sample的图
data = np.load("age_T20.npy").astype("float32") # var dy mae
dy = data[:, 1]
ID = np.arange(1, data.shape[0]+1)
#figsize = 8,6 # 800 * 600 像素
figsize = 7,5 # 800 * 600 像素
figure, ax = plt.subplots(figsize=figsize)
#fig = plt.gcf()
#fig.set_size_inches(6.4, 4.0)
plt.xlim(-10, 1080)
plt.ylim(-30, 42)
plt.scatter(ID,dy,s=10, c='black')
# 参数 s:设置散点大小
# 参数 c:设置散点颜色;常用的'r','b','g','w'...
# 参数 marker: 设置散点形状;常用的'+', 'o','x'...
#设置坐标刻度值的大小以及刻度值的字体
plt.tick_params(labelsize=15)
#设置坐标显示
#new_ticks = np.linspace(0,1200,5) #plt.xticks(new_ticks)
#在对应坐标处更换名称
plt.xticks([0, 250, 500, 750, 1000])
plt.yticks([-20, 0, 20, 40])
font1={'weight':'semibold',
'size':20
}
#styles=['normal','italic','oblique']
#weights=['light','normal','medium','semibold','bold','heavy','black']
plt.xlabel("Sample", fontdict = font1, horizontalalignment='center' )#x轴上的名字
plt.ylabel("f(x)-y", fontdict = font1, verticalalignment ='center')#y轴上的名字
plt.tight_layout() #调整整体空白
plt.savefig('./Figure/age_dy_sampletmp.eps')
plt.savefig('./Figure/age_dy_sampletmp.png')
plt.show()
def TC_draw_dy_sample():
# 画 f(x)-y VS sample的图
data = np.load("../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy").astype("float32") # var dy mae t b
dy = data[:, 1]
ID = np.arange(1, data.shape[0]+1)
#figsize = 8,6 # 800 * 600 像素
figsize = 7,5 # 800 * 600 像素
figure, ax = plt.subplots(figsize=figsize)
#fig = plt.gcf()
#fig.set_size_inches(6.4, 4.0)
#plt.xlim(-10, 5600)
#plt.ylim(-75, 50)
plt.scatter(ID,dy,s=10, c='black')
# 参数 s:设置散点大小
# 参数 c:设置散点颜色;常用的'r','b','g','w'...
# 参数 marker: 设置散点形状;常用的'+', 'o','x'...
#设置坐标刻度值的大小以及刻度值的字体
plt.tick_params(labelsize=15)
#设置坐标显示
#new_ticks = np.linspace(0,1200,5) #plt.xticks(new_ticks)
#在对应坐标处更换名称
#plt.xticks([0, 1000, 2000, 3000, 4000, 5000])
#plt.yticks([-50, -25, 0, 25, 50])
font1={'weight':'semibold',
'size':20
}
#styles=['normal','italic','oblique']
#weights=['light','normal','medium','semibold','bold','heavy','black']
plt.xlabel("Sample", fontdict = font1, horizontalalignment='center' )#x轴上的名字
plt.ylabel("f(x)-y", fontdict = font1, verticalalignment ='center')#y轴上的名字
plt.tight_layout() #调整整体空白
plt.savefig('./Figure/TC_dy_sampletmp.eps')
plt.savefig('./Figure/TC_dy_sampletmp.png')
plt.show()
def TC_testmse_testb_idealmse(row, col, num, fig):
# 画 f(x)-y VS sample的图
data = np.load("../TC/testmse_testb_idealmse.npy").astype("float32") # ID test_mse test_b, ideal_mse
#ID = data[:,0]
sumnum = data.shape[0]
ID = np.linspace(0.0020, 0.9999, 20)
print(ID.shape)
print(ID*sumnum)
test_mse = data[(ID*sumnum).astype(int), 1]
test_b = data[(ID*sumnum).astype(int), 2]
ideal_mse = data[(ID*sumnum).astype(int),3]
figsize = 8,6 # 800 * 600 像素
ax = fig.add_subplot(row, col, num)#, figsize=figsize)
#fig = plt.gcf()
#fig.set_size_inches(6.4, 4.0)
#plt.xlim(-10, 5600)
#plt.ylim(-75, 50)
ax.plot(ID, test_b, linewidth=2, c='black', marker='*', markersize=8, label=r'Blend-Var $r^\star$')
ax.plot(ID, test_mse, linewidth=2, c='b', marker='^', markersize=8, label=r'Blend-Var $\hat{r}$')
ax.plot(ID, ideal_mse, linewidth=2, c='r', marker='.', markersize=8, label=r'Ideal $\hat{r}$')
ax.axhline(y=data[int(ID[19]*sumnum), 1], ls="--",c="grey", linewidth=1)#添加水平直线
#设置坐标刻度值的大小以及刻度值的字体
ax.tick_params(labelsize=15)
font1={'weight':'semibold',
'size':20
}
#styles=['normal','italic','oblique']
#weights=['light','normal','medium','semibold','bold','heavy','black']
ax.set_xlabel("Coverage", fontdict = font1 )#x轴上的名字
ax.set_ylabel("Risk (MSE)", fontdict = font1)#y轴上的名字
#ax.set_title("Risk-Coverage Curve on the Validation Set\n for TC Intensity Estimation.", fontsize = 15)
#简单的设置legend(设置位置)
#位置在右上角
ax.legend(loc = 'upper right', fontsize = 15)
#plt.savefig('./Figure/TC_testmse_testb_idealmse.eps')
#plt.savefig('./Figure/TC_testmse_testb_idealmse.png')
#plt.show()
def AGE_testmse_testb_idealmse(row, col, num, fig):
# 画 f(x)-y VS sample的图
data = np.loadtxt("./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv", delimiter=",", ) # ID val_mae val_b, ideal_mae
#ID = data[:,0]
print(data.shape)
sumnum = data.shape[0]
ID = np.linspace(0.0005, 0.9999, 20)
val_mae = data[(ID*sumnum).astype(int), 1]
val_b = data[(ID*sumnum).astype(int), 2]
ideal_mae = data[(ID*sumnum).astype(int),3]
figsize = 8,6 # 800 * 600 像素
ax = fig.add_subplot(row, col, num)#, figsize=figsize)
#fig = plt.gcf()
#fig.set_size_inches(6.4, 4.0)
#plt.xlim(-10, 5600)
#plt.ylim(-75, 50)
ax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8, label=r'MC-dropout $r^\star$')
ax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8, label=r'MC-dropout $\hat{r}$')
ax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8, label=r'Ideal $\hat{r}$')
ax.axhline(y=data[int(sumnum)-1,1], ls="--",c="grey", linewidth=1)#添加水平直线
#设置坐标刻度值的大小以及刻度值的字体
ax.tick_params(labelsize=15)
font1={'weight':'semibold',
'size':20
}
#styles=['normal','italic','oblique']
#weights=['light','normal','medium','semibold','bold','heavy','black']
ax.set_xlabel("Coverage", fontdict = font1 )#x轴上的名字
ax.set_ylabel("Risk (MAE)", fontdict = font1)#y轴上的名字
#ax.set_title("Risk-Coverage Curve on the Validation Set\n for Apparent Age Estimation.", fontsize = 15)
#简单的设置legend(设置位置)
#位置在右上角
ax.legend(loc = 'lower right', fontsize = 15)
#plt.savefig('./Figure/AGE_testmse_testb_idealmse.eps')
#plt.savefig('./Figure/AGE_testmse_testb_idealmse.png')
#plt.show()
def AGE_diffT(row, col, num, fig):
# 画 f(x)-y VS sample的图
data = np.load("./diffT_mae.npy").astype("float32") # ID 02 04 08 10 15 20
ID = data[:,0]
sumnum = data.shape[0]
ID = np.linspace(0.0020, 0.9999, 20)
print(ID.shape)
print(ID*sumnum)
mae_02 = data[(ID*sumnum).astype(int), 1]
mae_04 = data[(ID*sumnum).astype(int), 2]
mae_08 = data[(ID*sumnum).astype(int), 3]
mae_10 = data[(ID*sumnum).astype(int), 4]
mae_15 = data[(ID*sumnum).astype(int), 5]
mae_20 = data[(ID*sumnum).astype(int), 6]
figsize = 8,6 # 800 * 600 像素
ax = fig.add_subplot(row, col, num)#, figsize=figsize)
#fig = plt.gcf()
#fig.set_size_inches(6.4, 4.0)
#plt.xlim(-10, 5600)
#plt.ylim(-75, 50)
ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')
ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')
ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')
ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')
#plt.plot(ID, mae_15, linewidth=2, marker='.', markersize=8, label='MAE_15')
ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')
ax.axhline(y=data[int(ID[19]*sumnum), 1], ls="--",c="grey", linewidth=1)#添加水平直线
#设置坐标刻度值的大小以及刻度值的字体
ax.tick_params(labelsize=15)
font1={'weight':'semibold',
'size':20
}
#styles=['normal','italic','oblique']
#weights=['light','normal','medium','semibold','bold','heavy','black']
ax.set_xlabel("Coverage", fontdict = font1)#x轴上的名字
ax.set_ylabel("Risk (MAE)", fontdict = font1)#y轴上的名字
#ax.set_title("the Influence of $T$ in MC-dropout.", fontsize = 15)
#简单的设置legend(设置位置)
#位置在右上角
ax.legend(loc = 'lower right', fontsize = 15)
#plt.savefig('./Figure/AGE_MCdropout_T.eps')
#plt.savefig('./Figure/AGE_MCdropout_T.png')
#plt.show()
def TC_diffT(row, col, num, fig):
# 画 f(x)-y VS sample的图
data = np.load("../TC/diffT_mse.npy").astype("float32") # ID 02 04 08 10 12 15
ID = data[:,0]
sumnum = data.shape[0]
ID = np.linspace(0.0020, 0.9999, 20)
print(ID.shape)
print(ID*sumnum)
mse_02 = data[(ID*sumnum).astype(int), 1]
mse_04 = data[(ID*sumnum).astype(int), 2]
mse_08 = data[(ID*sumnum).astype(int), 3]
mse_10 = data[(ID*sumnum).astype(int), 4]
mse_12 = data[(ID*sumnum).astype(int), 5]
mse_15 = data[(ID*sumnum).astype(int), 6]
figsize = 8,6 # 800 * 600 像素
ax = fig.add_subplot(row, col, num)#, figsize=figsize)
#fig = plt.gcf()
#fig.set_size_inches(6.4, 4.0)
#plt.xlim(-10, 5600)
#plt.ylim(-75, 50)
ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')
ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')
ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')
ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')
#plt.plot(ID, mse_12, linewidth=2, marker='.', markersize=8, label='MSE_12')
ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')
ax.axhline(y=data[int(ID[19]*sumnum), 1], ls="--",c="grey", linewidth=1)#添加水平直线
#设置坐标刻度值的大小以及刻度值的字体
ax.tick_params(labelsize=15)
font1={'weight':'semibold',
'size':20
}
#styles=['normal','italic','oblique']
#weights=['light','normal','medium','semibold','bold','heavy','black']
ax.set_xlabel("Coverage", fontdict = font1)#x轴上的名字
ax.set_ylabel("Risk (MSE)", fontdict = font1)#y轴上的名字
#ax.set_title("the Influence of $T$ in Blend-Var.", fontsize = 15)
#简单的设置legend(设置位置)
#位置在右上角
ax.legend(loc = 'upper right', fontsize = 15)
#plt.savefig('./Figure/TC_BlendVar_T.eps')
#plt.savefig('./Figure/TC_BlendVar_T.png')
#plt.show()
if __name__ == '__main__':
#draw_dy_sample()
TC_draw_dy_sample()
row = 1
col = 1
fig = plt.figure(num=1, figsize=(5.5,5))
TC_diffT(row, col, 1, fig)
fig.tight_layout() #调整整体空白
plt.savefig('./Figure/TC_BlendVar_Ttmp.eps')
plt.savefig('./Figure/TC_BlendVar_Ttmp.png')
plt.show()
row = 1
col = 1
fig = plt.figure(num=1, figsize=(5.5,5))
AGE_diffT(row, col, 1, fig)
fig.tight_layout() #调整整体空白
plt.savefig('./Figure/AGE_MCdropout_Ttmp.eps')
plt.savefig('./Figure/AGE_MCdropout_Ttmp.png')
plt.show()
row = 1
col = 1
fig = plt.figure(num=1, figsize=(5.5,5))
AGE_testmse_testb_idealmse(row, col, 1, fig)
fig.tight_layout() #调整整体空白
plt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.eps')
plt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.png')
plt.show()
row = 1
col = 1
fig = plt.figure(num=1, figsize=(5.5,5))
TC_testmse_testb_idealmse(row, col, 1, fig)
fig.tight_layout() #调整整体空白
plt.savefig('./Figure/TC_testmse_testb_idealmsetmp.eps')
plt.savefig('./Figure/TC_testmse_testb_idealmsetmp.png')
plt.show()
|
[
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef draw_dy_sample():\r\n\t# 画 f(x)-y VS sample的图\r\n\tdata = np.load(\"age_T20.npy\").astype(\"float32\") # var dy mae\r\n\tdy = data[:, 1]\r\n\r\n\tID = np.arange(1, data.shape[0]+1)\r\n\r\n\t#figsize = 8,6 # 800 * 600 像素\r\n\tfigsize = 7,5 # 800 * 600 像素\r\n\tfigure, ax = plt.subplots(figsize=figsize)\r\n\t#fig = plt.gcf()\r\n\t#fig.set_size_inches(6.4, 4.0)\r\n\r\n\tplt.xlim(-10, 1080)\r\n\tplt.ylim(-30, 42)\r\n\r\n\tplt.scatter(ID,dy,s=10, c='black')\r\n\t# 参数 s:设置散点大小\r\n\t# 参数 c:设置散点颜色;常用的'r','b','g','w'...\r\n\t# 参数 marker: 设置散点形状;常用的'+', 'o','x'...\r\n\r\n\t#设置坐标刻度值的大小以及刻度值的字体\r\n\tplt.tick_params(labelsize=15)\r\n\r\n\t#设置坐标显示\r\n\t#new_ticks = np.linspace(0,1200,5) #plt.xticks(new_ticks)\r\n\t#在对应坐标处更换名称\r\n\tplt.xticks([0, 250, 500, 750, 1000])\r\n\tplt.yticks([-20, 0, 20, 40])\r\n\r\n\r\n\tfont1={'weight':'semibold',\r\n \t'size':20\r\n\t}\r\n\t#styles=['normal','italic','oblique']\r\n #weights=['light','normal','medium','semibold','bold','heavy','black']\r\n\r\n\tplt.xlabel(\"Sample\", fontdict = font1, horizontalalignment='center' )#x轴上的名字\r\n\tplt.ylabel(\"f(x)-y\", fontdict = font1, verticalalignment ='center')#y轴上的名字\r\n\tplt.tight_layout() #调整整体空白\r\n\tplt.savefig('./Figure/age_dy_sampletmp.eps')\r\n\tplt.savefig('./Figure/age_dy_sampletmp.png')\r\n\tplt.show()\r\n\r\n\r\ndef TC_draw_dy_sample():\r\n\t# 画 f(x)-y VS sample的图\r\n\tdata = np.load(\"../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy\").astype(\"float32\") # var dy mae t b\r\n\tdy = data[:, 1]\r\n\tID = np.arange(1, data.shape[0]+1)\r\n\t#figsize = 8,6 # 800 * 600 像素\r\n\tfigsize = 7,5 # 800 * 600 像素\r\n\tfigure, ax = plt.subplots(figsize=figsize)\r\n\t#fig = plt.gcf()\r\n\t#fig.set_size_inches(6.4, 4.0)\r\n\r\n\t#plt.xlim(-10, 5600)\r\n\t#plt.ylim(-75, 50)\r\n\r\n\tplt.scatter(ID,dy,s=10, c='black')\r\n\t# 参数 s:设置散点大小\r\n\t# 参数 c:设置散点颜色;常用的'r','b','g','w'...\r\n\t# 参数 marker: 设置散点形状;常用的'+', 'o','x'...\r\n\r\n\t#设置坐标刻度值的大小以及刻度值的字体\r\n\tplt.tick_params(labelsize=15)\r\n\r\n\t#设置坐标显示\r\n\t#new_ticks = np.linspace(0,1200,5) #plt.xticks(new_ticks)\r\n\t#在对应坐标处更换名称\r\n\t#plt.xticks([0, 1000, 2000, 3000, 4000, 5000])\r\n\t#plt.yticks([-50, -25, 0, 25, 50])\r\n\r\n\r\n\tfont1={'weight':'semibold',\r\n \t'size':20\r\n\t}\r\n\t#styles=['normal','italic','oblique']\r\n #weights=['light','normal','medium','semibold','bold','heavy','black']\r\n\r\n\tplt.xlabel(\"Sample\", fontdict = font1, horizontalalignment='center' )#x轴上的名字\r\n\tplt.ylabel(\"f(x)-y\", fontdict = font1, verticalalignment ='center')#y轴上的名字\r\n\tplt.tight_layout() #调整整体空白\r\n\tplt.savefig('./Figure/TC_dy_sampletmp.eps')\r\n\tplt.savefig('./Figure/TC_dy_sampletmp.png')\r\n\tplt.show()\r\n\r\ndef TC_testmse_testb_idealmse(row, col, num, fig):\r\n\t# 画 f(x)-y VS sample的图\r\n\tdata = np.load(\"../TC/testmse_testb_idealmse.npy\").astype(\"float32\") # ID test_mse test_b, ideal_mse\r\n\t#ID = data[:,0]\r\n\tsumnum = data.shape[0]\r\n\tID = np.linspace(0.0020, 0.9999, 20)\r\n\tprint(ID.shape)\r\n\tprint(ID*sumnum)\r\n\ttest_mse = data[(ID*sumnum).astype(int), 1]\r\n\ttest_b = data[(ID*sumnum).astype(int), 2]\r\n\tideal_mse = data[(ID*sumnum).astype(int),3]\r\n\tfigsize = 8,6 # 800 * 600 像素\r\n\tax = fig.add_subplot(row, col, num)#, figsize=figsize)\r\n\t#fig = plt.gcf()\r\n\t#fig.set_size_inches(6.4, 4.0)\r\n\r\n\t#plt.xlim(-10, 5600)\r\n\t#plt.ylim(-75, 50)\r\n\tax.plot(ID, test_b, linewidth=2, c='black', marker='*', markersize=8, label=r'Blend-Var $r^\\star$')\r\n\tax.plot(ID, test_mse, linewidth=2, c='b', marker='^', markersize=8, label=r'Blend-Var $\\hat{r}$')\r\n\tax.plot(ID, ideal_mse, linewidth=2, c='r', marker='.', markersize=8, label=r'Ideal $\\hat{r}$')\r\n\r\n\r\n\tax.axhline(y=data[int(ID[19]*sumnum), 1], ls=\"--\",c=\"grey\", linewidth=1)#添加水平直线\r\n\t#设置坐标刻度值的大小以及刻度值的字体\r\n\tax.tick_params(labelsize=15)\r\n\r\n\tfont1={'weight':'semibold',\r\n \t'size':20\r\n\t}\r\n\t#styles=['normal','italic','oblique']\r\n #weights=['light','normal','medium','semibold','bold','heavy','black']\r\n\tax.set_xlabel(\"Coverage\", fontdict = font1 )#x轴上的名字\r\n\tax.set_ylabel(\"Risk (MSE)\", fontdict = font1)#y轴上的名字\r\n\t#ax.set_title(\"Risk-Coverage Curve on the Validation Set\\n for TC Intensity Estimation.\", fontsize = 15)\r\n\t#简单的设置legend(设置位置)\r\n\t#位置在右上角\r\n\tax.legend(loc = 'upper right', fontsize = 15)\r\n\t#plt.savefig('./Figure/TC_testmse_testb_idealmse.eps')\r\n\t#plt.savefig('./Figure/TC_testmse_testb_idealmse.png')\r\n\t#plt.show()\r\n\r\n\r\ndef AGE_testmse_testb_idealmse(row, col, num, fig):\r\n\t# 画 f(x)-y VS sample的图\r\n\tdata = np.loadtxt(\"./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv\", delimiter=\",\", ) # ID val_mae val_b, ideal_mae\r\n\t#ID = data[:,0]\r\n\tprint(data.shape)\r\n\tsumnum = data.shape[0]\r\n\tID = np.linspace(0.0005, 0.9999, 20)\r\n\tval_mae = data[(ID*sumnum).astype(int), 1]\r\n\tval_b = data[(ID*sumnum).astype(int), 2]\r\n\tideal_mae = data[(ID*sumnum).astype(int),3]\r\n\tfigsize = 8,6 # 800 * 600 像素\r\n\tax = fig.add_subplot(row, col, num)#, figsize=figsize)\r\n\t#fig = plt.gcf()\r\n\t#fig.set_size_inches(6.4, 4.0)\r\n\r\n\t#plt.xlim(-10, 5600)\r\n\t#plt.ylim(-75, 50)\r\n\tax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8, label=r'MC-dropout $r^\\star$')\r\n\tax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8, label=r'MC-dropout $\\hat{r}$')\r\n\tax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8, label=r'Ideal $\\hat{r}$')\r\n\r\n\tax.axhline(y=data[int(sumnum)-1,1], ls=\"--\",c=\"grey\", linewidth=1)#添加水平直线\r\n\t#设置坐标刻度值的大小以及刻度值的字体\r\n\tax.tick_params(labelsize=15)\r\n\r\n\tfont1={'weight':'semibold',\r\n \t'size':20\r\n\t}\r\n\t#styles=['normal','italic','oblique']\r\n #weights=['light','normal','medium','semibold','bold','heavy','black']\r\n\tax.set_xlabel(\"Coverage\", fontdict = font1 )#x轴上的名字\r\n\tax.set_ylabel(\"Risk (MAE)\", fontdict = font1)#y轴上的名字\r\n\t#ax.set_title(\"Risk-Coverage Curve on the Validation Set\\n for Apparent Age Estimation.\", fontsize = 15)\r\n\t#简单的设置legend(设置位置)\r\n\t#位置在右上角\r\n\tax.legend(loc = 'lower right', fontsize = 15)\r\n\t#plt.savefig('./Figure/AGE_testmse_testb_idealmse.eps')\r\n\t#plt.savefig('./Figure/AGE_testmse_testb_idealmse.png')\r\n\t#plt.show()\r\n\r\n\r\ndef AGE_diffT(row, col, num, fig):\r\n\t# 画 f(x)-y VS sample的图\r\n\tdata = np.load(\"./diffT_mae.npy\").astype(\"float32\") # ID 02 04 08 10 15 20\r\n\tID = data[:,0]\r\n\tsumnum = data.shape[0]\r\n\tID = np.linspace(0.0020, 0.9999, 20)\r\n\tprint(ID.shape)\r\n\tprint(ID*sumnum)\r\n\tmae_02 = data[(ID*sumnum).astype(int), 1]\r\n\tmae_04 = data[(ID*sumnum).astype(int), 2]\r\n\tmae_08 = data[(ID*sumnum).astype(int), 3]\r\n\tmae_10 = data[(ID*sumnum).astype(int), 4]\r\n\tmae_15 = data[(ID*sumnum).astype(int), 5]\r\n\tmae_20 = data[(ID*sumnum).astype(int), 6]\r\n\r\n\tfigsize = 8,6 # 800 * 600 像素\r\n\tax = fig.add_subplot(row, col, num)#, figsize=figsize)\r\n\t#fig = plt.gcf()\r\n\t#fig.set_size_inches(6.4, 4.0)\r\n\r\n\t#plt.xlim(-10, 5600)\r\n\t#plt.ylim(-75, 50)\r\n\tax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\r\n\tax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\r\n\tax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\r\n\tax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\r\n\t#plt.plot(ID, mae_15, linewidth=2, marker='.', markersize=8, label='MAE_15')\r\n\tax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\r\n\r\n\tax.axhline(y=data[int(ID[19]*sumnum), 1], ls=\"--\",c=\"grey\", linewidth=1)#添加水平直线\r\n\t#设置坐标刻度值的大小以及刻度值的字体\r\n\tax.tick_params(labelsize=15)\r\n\r\n\tfont1={'weight':'semibold',\r\n \t'size':20\r\n\t}\r\n\t#styles=['normal','italic','oblique']\r\n #weights=['light','normal','medium','semibold','bold','heavy','black']\r\n\tax.set_xlabel(\"Coverage\", fontdict = font1)#x轴上的名字\r\n\tax.set_ylabel(\"Risk (MAE)\", fontdict = font1)#y轴上的名字\r\n\t#ax.set_title(\"the Influence of $T$ in MC-dropout.\", fontsize = 15)\r\n\t#简单的设置legend(设置位置)\r\n\t#位置在右上角\r\n\tax.legend(loc = 'lower right', fontsize = 15)\r\n\t#plt.savefig('./Figure/AGE_MCdropout_T.eps')\r\n\t#plt.savefig('./Figure/AGE_MCdropout_T.png')\r\n\t#plt.show()\r\n\r\n\r\ndef TC_diffT(row, col, num, fig):\r\n\t# 画 f(x)-y VS sample的图\r\n\tdata = np.load(\"../TC/diffT_mse.npy\").astype(\"float32\") # ID 02 04 08 10 12 15\r\n\tID = data[:,0]\r\n\tsumnum = data.shape[0]\r\n\tID = np.linspace(0.0020, 0.9999, 20)\r\n\tprint(ID.shape)\r\n\tprint(ID*sumnum)\r\n\tmse_02 = data[(ID*sumnum).astype(int), 1]\r\n\tmse_04 = data[(ID*sumnum).astype(int), 2]\r\n\tmse_08 = data[(ID*sumnum).astype(int), 3]\r\n\tmse_10 = data[(ID*sumnum).astype(int), 4]\r\n\tmse_12 = data[(ID*sumnum).astype(int), 5]\r\n\tmse_15 = data[(ID*sumnum).astype(int), 6]\r\n\r\n\tfigsize = 8,6 # 800 * 600 像素\r\n\tax = fig.add_subplot(row, col, num)#, figsize=figsize)\r\n\t#fig = plt.gcf()\r\n\t#fig.set_size_inches(6.4, 4.0)\r\n\r\n\t#plt.xlim(-10, 5600)\r\n\t#plt.ylim(-75, 50)\r\n\tax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\r\n\tax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\r\n\tax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\r\n\tax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\r\n\t#plt.plot(ID, mse_12, linewidth=2, marker='.', markersize=8, label='MSE_12')\r\n\tax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\r\n\r\n\tax.axhline(y=data[int(ID[19]*sumnum), 1], ls=\"--\",c=\"grey\", linewidth=1)#添加水平直线\r\n\t#设置坐标刻度值的大小以及刻度值的字体\r\n\tax.tick_params(labelsize=15)\r\n\r\n\tfont1={'weight':'semibold',\r\n \t'size':20\r\n\t}\r\n\t#styles=['normal','italic','oblique']\r\n #weights=['light','normal','medium','semibold','bold','heavy','black']\r\n\tax.set_xlabel(\"Coverage\", fontdict = font1)#x轴上的名字\r\n\tax.set_ylabel(\"Risk (MSE)\", fontdict = font1)#y轴上的名字\r\n\t#ax.set_title(\"the Influence of $T$ in Blend-Var.\", fontsize = 15)\r\n\t#简单的设置legend(设置位置)\r\n\t#位置在右上角\r\n\tax.legend(loc = 'upper right', fontsize = 15)\r\n\t#plt.savefig('./Figure/TC_BlendVar_T.eps')\r\n\t#plt.savefig('./Figure/TC_BlendVar_T.png')\r\n\t#plt.show()\r\nif __name__ == '__main__':\r\n\t#draw_dy_sample()\r\n\tTC_draw_dy_sample()\r\n\t\r\n\trow = 1\r\n\tcol = 1\r\n\tfig = plt.figure(num=1, figsize=(5.5,5))\r\n\tTC_diffT(row, col, 1, fig)\r\n\tfig.tight_layout() #调整整体空白\r\n\tplt.savefig('./Figure/TC_BlendVar_Ttmp.eps')\r\n\tplt.savefig('./Figure/TC_BlendVar_Ttmp.png')\r\n\tplt.show()\r\n\t\r\n\trow = 1\r\n\tcol = 1\r\n\tfig = plt.figure(num=1, figsize=(5.5,5))\r\n\tAGE_diffT(row, col, 1, fig)\r\n\tfig.tight_layout() #调整整体空白\r\n\tplt.savefig('./Figure/AGE_MCdropout_Ttmp.eps')\r\n\tplt.savefig('./Figure/AGE_MCdropout_Ttmp.png')\r\n\tplt.show()\r\n\t\r\n\trow = 1\r\n\tcol = 1\r\n\tfig = plt.figure(num=1, figsize=(5.5,5))\r\n\tAGE_testmse_testb_idealmse(row, col, 1, fig)\r\n\tfig.tight_layout() #调整整体空白\r\n\tplt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.eps')\r\n\tplt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.png')\r\n\tplt.show()\r\n\t\r\n\trow = 1\r\n\tcol = 1\r\n\tfig = plt.figure(num=1, figsize=(5.5,5))\r\n\tTC_testmse_testb_idealmse(row, col, 1, fig)\r\n\tfig.tight_layout() #调整整体空白\r\n\tplt.savefig('./Figure/TC_testmse_testb_idealmsetmp.eps')\r\n\tplt.savefig('./Figure/TC_testmse_testb_idealmsetmp.png')\r\n\tplt.show()\r\n\t",
"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef draw_dy_sample():\n data = np.load('age_T20.npy').astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.xlim(-10, 1080)\n plt.ylim(-30, 42)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n plt.xticks([0, 250, 500, 750, 1000])\n plt.yticks([-20, 0, 20, 40])\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/age_dy_sampletmp.eps')\n plt.savefig('./Figure/age_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_testmse_testb_idealmse(row, col, num, fig):\n data = np.load('../TC/testmse_testb_idealmse.npy').astype('float32')\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n test_mse = data[(ID * sumnum).astype(int), 1]\n test_b = data[(ID * sumnum).astype(int), 2]\n ideal_mse = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, test_b, linewidth=2, c='black', marker='*', markersize=8,\n label='Blend-Var $r^\\\\star$')\n ax.plot(ID, test_mse, linewidth=2, c='b', marker='^', markersize=8,\n label='Blend-Var $\\\\hat{r}$')\n ax.plot(ID, ideal_mse, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\ndef AGE_testmse_testb_idealmse(row, col, num, fig):\n data = np.loadtxt('./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv',\n delimiter=',')\n print(data.shape)\n sumnum = data.shape[0]\n ID = np.linspace(0.0005, 0.9999, 20)\n val_mae = data[(ID * sumnum).astype(int), 1]\n val_b = data[(ID * sumnum).astype(int), 2]\n ideal_mae = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8,\n label='MC-dropout $r^\\\\star$')\n ax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8,\n label='MC-dropout $\\\\hat{r}$')\n ax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(sumnum) - 1, 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef TC_diffT(row, col, num, fig):\n data = np.load('../TC/diffT_mse.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mse_02 = data[(ID * sumnum).astype(int), 1]\n mse_04 = data[(ID * sumnum).astype(int), 2]\n mse_08 = data[(ID * sumnum).astype(int), 3]\n mse_10 = data[(ID * sumnum).astype(int), 4]\n mse_12 = data[(ID * sumnum).astype(int), 5]\n mse_15 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\n ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\n ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\n ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\n ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\nif __name__ == '__main__':\n TC_draw_dy_sample()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n TC_diffT(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/TC_BlendVar_Ttmp.eps')\n plt.savefig('./Figure/TC_BlendVar_Ttmp.png')\n plt.show()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n AGE_diffT(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/AGE_MCdropout_Ttmp.eps')\n plt.savefig('./Figure/AGE_MCdropout_Ttmp.png')\n plt.show()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n AGE_testmse_testb_idealmse(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.eps')\n plt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.png')\n plt.show()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n TC_testmse_testb_idealmse(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/TC_testmse_testb_idealmsetmp.eps')\n plt.savefig('./Figure/TC_testmse_testb_idealmsetmp.png')\n plt.show()\n",
"<import token>\n\n\ndef draw_dy_sample():\n data = np.load('age_T20.npy').astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.xlim(-10, 1080)\n plt.ylim(-30, 42)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n plt.xticks([0, 250, 500, 750, 1000])\n plt.yticks([-20, 0, 20, 40])\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/age_dy_sampletmp.eps')\n plt.savefig('./Figure/age_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_testmse_testb_idealmse(row, col, num, fig):\n data = np.load('../TC/testmse_testb_idealmse.npy').astype('float32')\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n test_mse = data[(ID * sumnum).astype(int), 1]\n test_b = data[(ID * sumnum).astype(int), 2]\n ideal_mse = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, test_b, linewidth=2, c='black', marker='*', markersize=8,\n label='Blend-Var $r^\\\\star$')\n ax.plot(ID, test_mse, linewidth=2, c='b', marker='^', markersize=8,\n label='Blend-Var $\\\\hat{r}$')\n ax.plot(ID, ideal_mse, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\ndef AGE_testmse_testb_idealmse(row, col, num, fig):\n data = np.loadtxt('./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv',\n delimiter=',')\n print(data.shape)\n sumnum = data.shape[0]\n ID = np.linspace(0.0005, 0.9999, 20)\n val_mae = data[(ID * sumnum).astype(int), 1]\n val_b = data[(ID * sumnum).astype(int), 2]\n ideal_mae = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8,\n label='MC-dropout $r^\\\\star$')\n ax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8,\n label='MC-dropout $\\\\hat{r}$')\n ax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(sumnum) - 1, 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef TC_diffT(row, col, num, fig):\n data = np.load('../TC/diffT_mse.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mse_02 = data[(ID * sumnum).astype(int), 1]\n mse_04 = data[(ID * sumnum).astype(int), 2]\n mse_08 = data[(ID * sumnum).astype(int), 3]\n mse_10 = data[(ID * sumnum).astype(int), 4]\n mse_12 = data[(ID * sumnum).astype(int), 5]\n mse_15 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\n ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\n ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\n ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\n ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\nif __name__ == '__main__':\n TC_draw_dy_sample()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n TC_diffT(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/TC_BlendVar_Ttmp.eps')\n plt.savefig('./Figure/TC_BlendVar_Ttmp.png')\n plt.show()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n AGE_diffT(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/AGE_MCdropout_Ttmp.eps')\n plt.savefig('./Figure/AGE_MCdropout_Ttmp.png')\n plt.show()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n AGE_testmse_testb_idealmse(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.eps')\n plt.savefig('./Figure/AGE_testmse_testb_idealmsetmp.png')\n plt.show()\n row = 1\n col = 1\n fig = plt.figure(num=1, figsize=(5.5, 5))\n TC_testmse_testb_idealmse(row, col, 1, fig)\n fig.tight_layout()\n plt.savefig('./Figure/TC_testmse_testb_idealmsetmp.eps')\n plt.savefig('./Figure/TC_testmse_testb_idealmsetmp.png')\n plt.show()\n",
"<import token>\n\n\ndef draw_dy_sample():\n data = np.load('age_T20.npy').astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.xlim(-10, 1080)\n plt.ylim(-30, 42)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n plt.xticks([0, 250, 500, 750, 1000])\n plt.yticks([-20, 0, 20, 40])\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/age_dy_sampletmp.eps')\n plt.savefig('./Figure/age_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_testmse_testb_idealmse(row, col, num, fig):\n data = np.load('../TC/testmse_testb_idealmse.npy').astype('float32')\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n test_mse = data[(ID * sumnum).astype(int), 1]\n test_b = data[(ID * sumnum).astype(int), 2]\n ideal_mse = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, test_b, linewidth=2, c='black', marker='*', markersize=8,\n label='Blend-Var $r^\\\\star$')\n ax.plot(ID, test_mse, linewidth=2, c='b', marker='^', markersize=8,\n label='Blend-Var $\\\\hat{r}$')\n ax.plot(ID, ideal_mse, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\ndef AGE_testmse_testb_idealmse(row, col, num, fig):\n data = np.loadtxt('./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv',\n delimiter=',')\n print(data.shape)\n sumnum = data.shape[0]\n ID = np.linspace(0.0005, 0.9999, 20)\n val_mae = data[(ID * sumnum).astype(int), 1]\n val_b = data[(ID * sumnum).astype(int), 2]\n ideal_mae = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8,\n label='MC-dropout $r^\\\\star$')\n ax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8,\n label='MC-dropout $\\\\hat{r}$')\n ax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(sumnum) - 1, 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef TC_diffT(row, col, num, fig):\n data = np.load('../TC/diffT_mse.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mse_02 = data[(ID * sumnum).astype(int), 1]\n mse_04 = data[(ID * sumnum).astype(int), 2]\n mse_08 = data[(ID * sumnum).astype(int), 3]\n mse_10 = data[(ID * sumnum).astype(int), 4]\n mse_12 = data[(ID * sumnum).astype(int), 5]\n mse_15 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\n ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\n ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\n ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\n ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\ndef TC_testmse_testb_idealmse(row, col, num, fig):\n data = np.load('../TC/testmse_testb_idealmse.npy').astype('float32')\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n test_mse = data[(ID * sumnum).astype(int), 1]\n test_b = data[(ID * sumnum).astype(int), 2]\n ideal_mse = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, test_b, linewidth=2, c='black', marker='*', markersize=8,\n label='Blend-Var $r^\\\\star$')\n ax.plot(ID, test_mse, linewidth=2, c='b', marker='^', markersize=8,\n label='Blend-Var $\\\\hat{r}$')\n ax.plot(ID, ideal_mse, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\ndef AGE_testmse_testb_idealmse(row, col, num, fig):\n data = np.loadtxt('./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv',\n delimiter=',')\n print(data.shape)\n sumnum = data.shape[0]\n ID = np.linspace(0.0005, 0.9999, 20)\n val_mae = data[(ID * sumnum).astype(int), 1]\n val_b = data[(ID * sumnum).astype(int), 2]\n ideal_mae = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8,\n label='MC-dropout $r^\\\\star$')\n ax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8,\n label='MC-dropout $\\\\hat{r}$')\n ax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(sumnum) - 1, 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef TC_diffT(row, col, num, fig):\n data = np.load('../TC/diffT_mse.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mse_02 = data[(ID * sumnum).astype(int), 1]\n mse_04 = data[(ID * sumnum).astype(int), 2]\n mse_08 = data[(ID * sumnum).astype(int), 3]\n mse_10 = data[(ID * sumnum).astype(int), 4]\n mse_12 = data[(ID * sumnum).astype(int), 5]\n mse_15 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\n ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\n ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\n ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\n ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\n<function token>\n\n\ndef AGE_testmse_testb_idealmse(row, col, num, fig):\n data = np.loadtxt('./val_mae_val_b_ideal_mae/valmae_valb_idealmae.csv',\n delimiter=',')\n print(data.shape)\n sumnum = data.shape[0]\n ID = np.linspace(0.0005, 0.9999, 20)\n val_mae = data[(ID * sumnum).astype(int), 1]\n val_b = data[(ID * sumnum).astype(int), 2]\n ideal_mae = data[(ID * sumnum).astype(int), 3]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, val_b, linewidth=2, c='black', marker='*', markersize=8,\n label='MC-dropout $r^\\\\star$')\n ax.plot(ID, val_mae, linewidth=2, c='b', marker='^', markersize=8,\n label='MC-dropout $\\\\hat{r}$')\n ax.plot(ID, ideal_mae, linewidth=2, c='r', marker='.', markersize=8,\n label='Ideal $\\\\hat{r}$')\n ax.axhline(y=data[int(sumnum) - 1, 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef TC_diffT(row, col, num, fig):\n data = np.load('../TC/diffT_mse.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mse_02 = data[(ID * sumnum).astype(int), 1]\n mse_04 = data[(ID * sumnum).astype(int), 2]\n mse_08 = data[(ID * sumnum).astype(int), 3]\n mse_10 = data[(ID * sumnum).astype(int), 4]\n mse_12 = data[(ID * sumnum).astype(int), 5]\n mse_15 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\n ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\n ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\n ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\n ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\n<function token>\n<function token>\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\ndef TC_diffT(row, col, num, fig):\n data = np.load('../TC/diffT_mse.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mse_02 = data[(ID * sumnum).astype(int), 1]\n mse_04 = data[(ID * sumnum).astype(int), 2]\n mse_08 = data[(ID * sumnum).astype(int), 3]\n mse_10 = data[(ID * sumnum).astype(int), 4]\n mse_12 = data[(ID * sumnum).astype(int), 5]\n mse_15 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mse_02, linewidth=2, marker='.', markersize=8, label='MSE_02')\n ax.plot(ID, mse_04, linewidth=2, marker='*', markersize=8, label='MSE_04')\n ax.plot(ID, mse_08, linewidth=2, marker='x', markersize=8, label='MSE_08')\n ax.plot(ID, mse_10, linewidth=2, marker='^', markersize=8, label='MSE_10')\n ax.plot(ID, mse_15, linewidth=2, marker='o', markersize=8, label='MSE_15')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MSE)', fontdict=font1)\n ax.legend(loc='upper right', fontsize=15)\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef TC_draw_dy_sample():\n data = np.load('../TC/Var2015-2017/10/var-dy-random_9_30/10_val.npy'\n ).astype('float32')\n dy = data[:, 1]\n ID = np.arange(1, data.shape[0] + 1)\n figsize = 7, 5\n figure, ax = plt.subplots(figsize=figsize)\n plt.scatter(ID, dy, s=10, c='black')\n plt.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n plt.xlabel('Sample', fontdict=font1, horizontalalignment='center')\n plt.ylabel('f(x)-y', fontdict=font1, verticalalignment='center')\n plt.tight_layout()\n plt.savefig('./Figure/TC_dy_sampletmp.eps')\n plt.savefig('./Figure/TC_dy_sampletmp.png')\n plt.show()\n\n\n<function token>\n<function token>\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef AGE_diffT(row, col, num, fig):\n data = np.load('./diffT_mae.npy').astype('float32')\n ID = data[:, 0]\n sumnum = data.shape[0]\n ID = np.linspace(0.002, 0.9999, 20)\n print(ID.shape)\n print(ID * sumnum)\n mae_02 = data[(ID * sumnum).astype(int), 1]\n mae_04 = data[(ID * sumnum).astype(int), 2]\n mae_08 = data[(ID * sumnum).astype(int), 3]\n mae_10 = data[(ID * sumnum).astype(int), 4]\n mae_15 = data[(ID * sumnum).astype(int), 5]\n mae_20 = data[(ID * sumnum).astype(int), 6]\n figsize = 8, 6\n ax = fig.add_subplot(row, col, num)\n ax.plot(ID, mae_02, linewidth=2, marker='.', markersize=8, label='MAE_02')\n ax.plot(ID, mae_04, linewidth=2, marker='*', markersize=8, label='MAE_04')\n ax.plot(ID, mae_08, linewidth=2, marker='x', markersize=8, label='MAE_08')\n ax.plot(ID, mae_10, linewidth=2, marker='^', markersize=8, label='MAE_10')\n ax.plot(ID, mae_20, linewidth=2, marker='o', markersize=8, label='MAE_20')\n ax.axhline(y=data[int(ID[19] * sumnum), 1], ls='--', c='grey', linewidth=1)\n ax.tick_params(labelsize=15)\n font1 = {'weight': 'semibold', 'size': 20}\n ax.set_xlabel('Coverage', fontdict=font1)\n ax.set_ylabel('Risk (MAE)', fontdict=font1)\n ax.legend(loc='lower right', fontsize=15)\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,483 |
7753ccc9f6d0a83e6cb4f89e579a65c476534b9d
|
# this works for Pycharm
from HYBRID_METHOD._functions_hybrid_methods import soa_with_ocsvm
from HYBRID_METHOD._constants import SS_TOMEK, SS_SMOTE, SS_ADASYN, SS_ENN, SS_ROS
from HYBRID_METHOD._constants import SAMPLES_RATIO_02, SAMPLES_RATIO_03
from HYBRID_METHOD._constants import CLF_SVC
# this works for Spyder
# import sys
# sys.path.insert(0, 'D:/pGnip/diplomovka_predikcia/HYBRID_METHOD')
# from _functions_hybrid_methods import hybrid_method_with_ocsvm
# from _constants import SAMPLES_RATIO_01, SAMPLES_RATIO_02, SAMPLES_RATIO_03
# from _constants import SS_ADASYN, SS_SMOTE, SS_TOMEK, SS_ENN, SS_ROS
# from _constants import CLF_SVC
# samples ratio 39:1
soa_with_ocsvm(SAMPLES_RATIO_02, SS_ROS, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_02, SS_ENN, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_02, SS_SMOTE, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_02, SS_ADASYN, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_02, SS_TOMEK, CLF_SVC)
# samples ratio 50:1
soa_with_ocsvm(SAMPLES_RATIO_03, SS_ROS, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_03, SS_ENN, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_03, SS_SMOTE, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_03, SS_ADASYN, CLF_SVC)
soa_with_ocsvm(SAMPLES_RATIO_03, SS_TOMEK, CLF_SVC)
|
[
"\n# this works for Pycharm\nfrom HYBRID_METHOD._functions_hybrid_methods import soa_with_ocsvm\nfrom HYBRID_METHOD._constants import SS_TOMEK, SS_SMOTE, SS_ADASYN, SS_ENN, SS_ROS\nfrom HYBRID_METHOD._constants import SAMPLES_RATIO_02, SAMPLES_RATIO_03\nfrom HYBRID_METHOD._constants import CLF_SVC\n\n# this works for Spyder\n# import sys\n# sys.path.insert(0, 'D:/pGnip/diplomovka_predikcia/HYBRID_METHOD')\n# from _functions_hybrid_methods import hybrid_method_with_ocsvm\n# from _constants import SAMPLES_RATIO_01, SAMPLES_RATIO_02, SAMPLES_RATIO_03\n# from _constants import SS_ADASYN, SS_SMOTE, SS_TOMEK, SS_ENN, SS_ROS\n# from _constants import CLF_SVC\n\n# samples ratio 39:1\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ROS, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ENN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_SMOTE, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ADASYN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_TOMEK, CLF_SVC)\n\n# samples ratio 50:1\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ROS, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ENN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_SMOTE, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ADASYN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_TOMEK, CLF_SVC)\n",
"from HYBRID_METHOD._functions_hybrid_methods import soa_with_ocsvm\nfrom HYBRID_METHOD._constants import SS_TOMEK, SS_SMOTE, SS_ADASYN, SS_ENN, SS_ROS\nfrom HYBRID_METHOD._constants import SAMPLES_RATIO_02, SAMPLES_RATIO_03\nfrom HYBRID_METHOD._constants import CLF_SVC\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ROS, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ENN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_SMOTE, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ADASYN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_TOMEK, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ROS, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ENN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_SMOTE, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ADASYN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_TOMEK, CLF_SVC)\n",
"<import token>\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ROS, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ENN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_SMOTE, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_ADASYN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_02, SS_TOMEK, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ROS, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ENN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_SMOTE, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_ADASYN, CLF_SVC)\nsoa_with_ocsvm(SAMPLES_RATIO_03, SS_TOMEK, CLF_SVC)\n",
"<import token>\n<code token>\n"
] | false |
98,484 |
4634f73ce5f3164d58d6e330cb200fa3c2351f61
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
# input_file = sys.argv[1]
# output_file = sys.argv[2]
with open('./input_file.csv','r') as filereader:
with open('./output_file.csv','w') as filewriter:
header = filereader.readline() #使用readline读取第一行标题行并赋给header
header = header.strip() #清除空格
header_list = header.split(',') #使用逗号将元素分隔开
print(header_list)
filewriter.write(','.join(map(str,header_list))+'\n') #将header_list的元素按逗号连接起来写入output_file文件里
for row in filereader: #注意:因为第一行已经通过readline读取完了,此处的for循环则只读取剩下的内容
row = row.strip()
row_list = row.split('-')
print(row_list)
filewriter.write(','.join(map(str,row_list))+'\n')
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\n\n# input_file = sys.argv[1]\n# output_file = sys.argv[2]\n\nwith open('./input_file.csv','r') as filereader:\n with open('./output_file.csv','w') as filewriter:\n header = filereader.readline() #使用readline读取第一行标题行并赋给header\n header = header.strip() #清除空格\n header_list = header.split(',') #使用逗号将元素分隔开\n print(header_list)\n filewriter.write(','.join(map(str,header_list))+'\\n') #将header_list的元素按逗号连接起来写入output_file文件里\n for row in filereader: #注意:因为第一行已经通过readline读取完了,此处的for循环则只读取剩下的内容\n row = row.strip()\n row_list = row.split('-')\n print(row_list)\n filewriter.write(','.join(map(str,row_list))+'\\n')\n",
"import sys\nwith open('./input_file.csv', 'r') as filereader:\n with open('./output_file.csv', 'w') as filewriter:\n header = filereader.readline()\n header = header.strip()\n header_list = header.split(',')\n print(header_list)\n filewriter.write(','.join(map(str, header_list)) + '\\n')\n for row in filereader:\n row = row.strip()\n row_list = row.split('-')\n print(row_list)\n filewriter.write(','.join(map(str, row_list)) + '\\n')\n",
"<import token>\nwith open('./input_file.csv', 'r') as filereader:\n with open('./output_file.csv', 'w') as filewriter:\n header = filereader.readline()\n header = header.strip()\n header_list = header.split(',')\n print(header_list)\n filewriter.write(','.join(map(str, header_list)) + '\\n')\n for row in filereader:\n row = row.strip()\n row_list = row.split('-')\n print(row_list)\n filewriter.write(','.join(map(str, row_list)) + '\\n')\n",
"<import token>\n<code token>\n"
] | false |
98,485 |
bd20476ac9002a1ec10900bfca8b2d8ef742d63a
|
from itertools import combinations_with_replacement
from header import schema
from sklearn.svm import LinearSVC
import numpy as np
import pandas as pd
import pydot
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.externals.six import StringIO
from sklearn.linear_model import LinearRegression, Lasso, ElasticNetCV, LassoCV, ElasticNet
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor, export_graphviz
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor, NearestNeighbors
def load_data(filename='../rsrc/data.csv', numeric=True):
df = pd.read_csv(filename, names=schema)
idx = df.communityname
if numeric:
return df._get_numeric_data()
else:
return df
def select_columns(df, y_col, x_cols=None, quadratic=True):
'Returns a dataset in form x, y'
x = df.drop(y_col, 1) if x_cols is None else df[x_cols]
y = df[y_col]
return np.array(x), np.array(y)
def partition_dataset(dataset, split_prop=.8):
xs, ys = dataset
data_size = len(xs)
split = int(data_size * split_prop)
train = xs[:split], ys[:split]
test = xs[split:], ys[split:]
return train, test
def quadratic_features(df):
df = df.copy()
cols = df.columns.values
for a, b in combinations_with_replacement(cols, 2):
feat = a + '*' + b
df[feat] = df[a] * df[b]
return df.columns.values, df.values
def important_features(features, coeffs):
return sorted(zip(features, coeffs), key=lambda x: x[1], reverse=True)
def show(vec):
for x in vec:
print x
def show_linear_results(feats, model):
imp = important_features(feats, model.coef_)
print 'Associated features: '
show(imp[:20])
print 'Disassociated features: '
show(imp[-20:][::-1])
def train_model(feats, x, y, model, split_ratio=.8):
split = len(y) * split_ratio
model.fit(x[:split], y[:split])
pred = model.predict(x[split:])
# sq_err = err ** 2
# print 'Average Error: ', np.average(np.abs(err))
# print 'Avg. Rel. Error: ', np.average(np.abs(err)) / np.average(y[split:])
print 'Mean Absolute Error'
print mean_absolute_error(pred, y[split:])
print 'RMSE'
print mean_squared_error(pred, y[split:])**.5
if type(model) in [Lasso, LinearRegression]:
show_linear_results(feats, model)
return model
def lin_reg(feats, x, y):
m = LinearRegression(n_jobs=-1, normalize=True)
return train_model(feats, x, y, m)
def lasso(feats, x, y, alpha=.0005, iters=3000):
m = Lasso(alpha=alpha, max_iter=iters, normalize=True)
# m = LassoCV(eps=.00001, n_jobs=-1)
return train_model(feats, x, y, m)
def enet(feats, x, y, alpha=.0005, iters=3000):
m = ElasticNetCV()
return train_model(feats, x, y, m)
def regtree(feats, x, y):
m = DecisionTreeRegressor(max_depth=3)
m = train_model(feats, x, y, m)
return m
def rf(feats, x, y):
m = RandomForestRegressor()
m = train_model(feats, x, y, m)
return m
def svm(feats, x, y):
m = SVR()
m = train_model(feats, x, y, m)
return m
def knn(feats, x, y):
m = KNeighborsRegressor(n_neighbors=10, weights='distance', leaf_size=20)
m = train_model(feats, x, y, m)
return m
def experiment(df, dv, model=lasso, feats='quadratic'):
if feats == 'linear':
IVs = df.drop(dv, 1).values
feats = df.drop(dv, 1).columns.values
else:
feats, IVs = quadratic_features(df.drop(dv, 1))
y = df[dv].values
return model(feats, IVs, y)
def show_tree(clf, feature_names, file_name):
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data,
feature_names=feature_names)
graph = pydot.graph_from_dot_data(dot_data.getvalue(),)
graph.write_pdf(file_name)
'''('MalePctDivorce*PctPersDenseHous', 0.30729275274456691)
('racepctblack*PctHousLess3BR', 0.24991535001964477)
('HispPerCap*NumStreet', 0.14249808478068951)
('PctHousNoPhone*MedRent', 0.094030129820233843)
('pctWPubAsst*whitePerCap', 0.086722631950222673)
('pctUrban*PctIlleg', 0.06935651216941284)
('PctIlleg*PctHousLess3BR', 0.063355648990797805)
('MalePctDivorce*PctIlleg', 0.056406095389051503)
('PctHousLess3BR*HousVacant', 0.050754318447706268)
('PctIlleg*PctSameCity85', 0.044622293970025581)
('MalePctDivorce*PctVacantBoarded', 0.041138115481024327)
('MalePctDivorce*HousVacant', 0.035023958900577037)
('FemalePctDiv*MedRentPctHousInc', 0.032977637133629764)
('pctWPubAsst*MalePctDivorce', 0.019802316010702738)
('FemalePctDiv*PctIlleg', 0.019368969618522035)
('TotalPctDiv*MedRentPctHousInc', 0.015073230757971201)
('whitePerCap*PctPopUnderPov', 0.0099085617576258628)
'''
def binarize(Y, percentile):
thresh = np.percentile(Y, percentile)
if percentile < 50:
return Y < thresh
return Y > thresh
def rmse(pred, truth):
np.abs(pred) - np.truth
from collections import defaultdict
class NNLR:
def __init__(self, k=5, rad=2, mode='k', feat_names=None):
self.mode = mode
self.k = k
self.NN = NearestNeighbors(k, radius=rad)
def fit(self, X, Y):
self.X = X
self.Y = Y
self.NN.fit(X)
self.active=defaultdict(int)
def nn_lin(self, testX, neighbors):
l = DecisionTreeRegressor()
return np.mean(self.Y[neighbors])
l.fit(self.X[neighbors], self.Y[neighbors])
# for idx in np.where(l.coef_)[0]:
# self.active[idx]+=1
return l.predict([testX])[0]
def predict(self, X):
if self.mode == 'k':
neighbors = self.NN.kneighbors(X)[1]
elif self.mode == 'rad':
neighbors = self.NN.radius_neighbors(X)[1]
return np.array([self.nn_lin(Xtst, nbr) for (Xtst, nbr) in zip(X, neighbors)])
def tst(X, Y, k=3, rad=4, mode='k'):
trX = X[:-1200]
trY = Y[:-1200]
tstX = X[-400:]
tstY = Y[-400:]
nnlr = NNLR(k, rad, mode)
nnlr.fit(trX, trY)
pred = nnlr.predict(trX)
print 'Training Set'
print 'Root Mean Squared Error'
print mean_squared_error(trY, pred)**.5
print 'Root Mean Error'
print mean_absolute_error(trY, pred)
# print zip(pred, trX)[:5]
print nnlr.active
pred = nnlr.predict(tstX)
print 'Test Set'
print 'Root Mean Squared Error'
print mean_squared_error(tstY, pred)**.5
print 'Root Mean Error'
print mean_absolute_error(tstY, pred)
# print zip(pred, tstY)[:5]
print nnlr.active
def tst2(X, Y, k=3, rad=4, mode='k'):
rmse = []
ame = []
for k in range(1,20):
trX = X[:-1200]
trY = Y[:-1200]
tstX = X[-400:]
tstY = Y[-400:]
nnlr = NNLR(k, rad, mode)
nnlr.fit(trX, trY)
pred = nnlr.predict(tstX)
ms= mean_squared_error(tstY, pred)**.5
m= mean_absolute_error(tstY, pred)
# print zip(pred, tstY)[:5]
print nnlr.active
ame.append(m)
rmse.append(ms)
plt.title('AME vs k')
plt.xlabel('k')
plt.ylabel('Holdout Average Mean Error')
plt.plot(ame)
plt.show()
plt.title('RMSE vs k')
plt.xlabel('k')
plt.ylabel('Holdout RMSE')
plt.plot(rmse)
plt.show()
import matplotlib.pyplot as plt
cm = plt.cm.get_cmap('RdYlBu')
def cscatter(x,y,z, xl, yl):
# z = df.ViolentCrimesPerPop
# xz = df.racepctblack*df.racePctHisp
# xz=df.racePctWhite*df.racepctblack
# xy = df.PctPopUnderPov
sc = plt.scatter(x, y, c=z, cmap=coolwarm)
plt.xlim(0,1.02)
plt.ylim(-.01,1.01)
# sc = plt.scatter(xy, xz, color=colors)
plt.colorbar(sc,label='Violent Crime Rate',shrink=.5)
plt.xlabel(xl)
plt.ylabel(yl)
# plt.show()
|
[
"from itertools import combinations_with_replacement\nfrom header import schema\nfrom sklearn.svm import LinearSVC\nimport numpy as np\nimport pandas as pd\nimport pydot\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\n\nfrom sklearn.externals.six import StringIO\nfrom sklearn.linear_model import LinearRegression, Lasso, ElasticNetCV, LassoCV, ElasticNet\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.tree import DecisionTreeRegressor, export_graphviz\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neighbors import KNeighborsRegressor, NearestNeighbors\n\n\ndef load_data(filename='../rsrc/data.csv', numeric=True):\n df = pd.read_csv(filename, names=schema)\n idx = df.communityname\n if numeric:\n return df._get_numeric_data()\n else:\n return df\n\n\ndef select_columns(df, y_col, x_cols=None, quadratic=True):\n 'Returns a dataset in form x, y'\n x = df.drop(y_col, 1) if x_cols is None else df[x_cols]\n y = df[y_col]\n return np.array(x), np.array(y)\n\n\ndef partition_dataset(dataset, split_prop=.8):\n xs, ys = dataset\n data_size = len(xs)\n split = int(data_size * split_prop)\n train = xs[:split], ys[:split]\n test = xs[split:], ys[split:]\n return train, test\n\n\ndef quadratic_features(df):\n df = df.copy()\n cols = df.columns.values\n for a, b in combinations_with_replacement(cols, 2):\n feat = a + '*' + b\n df[feat] = df[a] * df[b]\n return df.columns.values, df.values\n\n\ndef important_features(features, coeffs):\n return sorted(zip(features, coeffs), key=lambda x: x[1], reverse=True)\n\n\ndef show(vec):\n for x in vec:\n print x\n\n\ndef show_linear_results(feats, model):\n imp = important_features(feats, model.coef_)\n print 'Associated features: '\n show(imp[:20])\n print 'Disassociated features: '\n show(imp[-20:][::-1])\n\n\ndef train_model(feats, x, y, model, split_ratio=.8):\n split = len(y) * split_ratio\n model.fit(x[:split], y[:split])\n\n pred = model.predict(x[split:])\n # sq_err = err ** 2\n # print 'Average Error: ', np.average(np.abs(err))\n # print 'Avg. Rel. Error: ', np.average(np.abs(err)) / np.average(y[split:])\n print 'Mean Absolute Error'\n print mean_absolute_error(pred, y[split:])\n print 'RMSE'\n print mean_squared_error(pred, y[split:])**.5\n if type(model) in [Lasso, LinearRegression]:\n show_linear_results(feats, model)\n return model\n\n\ndef lin_reg(feats, x, y):\n m = LinearRegression(n_jobs=-1, normalize=True)\n return train_model(feats, x, y, m)\n\n\ndef lasso(feats, x, y, alpha=.0005, iters=3000):\n m = Lasso(alpha=alpha, max_iter=iters, normalize=True)\n # m = LassoCV(eps=.00001, n_jobs=-1)\n return train_model(feats, x, y, m)\n\n\ndef enet(feats, x, y, alpha=.0005, iters=3000):\n m = ElasticNetCV()\n return train_model(feats, x, y, m)\n\n\ndef regtree(feats, x, y):\n m = DecisionTreeRegressor(max_depth=3)\n m = train_model(feats, x, y, m)\n return m\n\n\ndef rf(feats, x, y):\n m = RandomForestRegressor()\n m = train_model(feats, x, y, m)\n return m\n\n\ndef svm(feats, x, y):\n m = SVR()\n m = train_model(feats, x, y, m)\n return m\n\n\ndef knn(feats, x, y):\n m = KNeighborsRegressor(n_neighbors=10, weights='distance', leaf_size=20)\n m = train_model(feats, x, y, m)\n return m\n\n\ndef experiment(df, dv, model=lasso, feats='quadratic'):\n if feats == 'linear':\n IVs = df.drop(dv, 1).values\n feats = df.drop(dv, 1).columns.values\n else:\n feats, IVs = quadratic_features(df.drop(dv, 1))\n y = df[dv].values\n return model(feats, IVs, y)\n\n\ndef show_tree(clf, feature_names, file_name):\n dot_data = StringIO()\n export_graphviz(clf, out_file=dot_data,\n feature_names=feature_names)\n graph = pydot.graph_from_dot_data(dot_data.getvalue(),)\n graph.write_pdf(file_name)\n\n'''('MalePctDivorce*PctPersDenseHous', 0.30729275274456691)\n('racepctblack*PctHousLess3BR', 0.24991535001964477)\n('HispPerCap*NumStreet', 0.14249808478068951)\n('PctHousNoPhone*MedRent', 0.094030129820233843)\n('pctWPubAsst*whitePerCap', 0.086722631950222673)\n('pctUrban*PctIlleg', 0.06935651216941284)\n('PctIlleg*PctHousLess3BR', 0.063355648990797805)\n('MalePctDivorce*PctIlleg', 0.056406095389051503)\n('PctHousLess3BR*HousVacant', 0.050754318447706268)\n('PctIlleg*PctSameCity85', 0.044622293970025581)\n('MalePctDivorce*PctVacantBoarded', 0.041138115481024327)\n('MalePctDivorce*HousVacant', 0.035023958900577037)\n('FemalePctDiv*MedRentPctHousInc', 0.032977637133629764)\n('pctWPubAsst*MalePctDivorce', 0.019802316010702738)\n('FemalePctDiv*PctIlleg', 0.019368969618522035)\n('TotalPctDiv*MedRentPctHousInc', 0.015073230757971201)\n('whitePerCap*PctPopUnderPov', 0.0099085617576258628)\n'''\n\n\ndef binarize(Y, percentile):\n thresh = np.percentile(Y, percentile)\n if percentile < 50:\n return Y < thresh\n return Y > thresh\n\n\ndef rmse(pred, truth):\n np.abs(pred) - np.truth\n\nfrom collections import defaultdict\nclass NNLR:\n\n def __init__(self, k=5, rad=2, mode='k', feat_names=None):\n self.mode = mode\n self.k = k\n self.NN = NearestNeighbors(k, radius=rad)\n \n def fit(self, X, Y):\n self.X = X\n self.Y = Y\n self.NN.fit(X)\n self.active=defaultdict(int)\n def nn_lin(self, testX, neighbors):\n l = DecisionTreeRegressor()\n return np.mean(self.Y[neighbors])\n l.fit(self.X[neighbors], self.Y[neighbors])\n # for idx in np.where(l.coef_)[0]:\n # self.active[idx]+=1\n return l.predict([testX])[0]\n\n def predict(self, X):\n if self.mode == 'k':\n neighbors = self.NN.kneighbors(X)[1]\n elif self.mode == 'rad':\n neighbors = self.NN.radius_neighbors(X)[1]\n return np.array([self.nn_lin(Xtst, nbr) for (Xtst, nbr) in zip(X, neighbors)])\n\n\ndef tst(X, Y, k=3, rad=4, mode='k'):\n trX = X[:-1200]\n trY = Y[:-1200]\n tstX = X[-400:]\n tstY = Y[-400:]\n\n nnlr = NNLR(k, rad, mode)\n\n nnlr.fit(trX, trY)\n\n pred = nnlr.predict(trX)\n print 'Training Set'\n print 'Root Mean Squared Error'\n print mean_squared_error(trY, pred)**.5\n print 'Root Mean Error'\n print mean_absolute_error(trY, pred)\n # print zip(pred, trX)[:5]\n print nnlr.active\n\n pred = nnlr.predict(tstX)\n print 'Test Set'\n print 'Root Mean Squared Error'\n print mean_squared_error(tstY, pred)**.5\n print 'Root Mean Error'\n print mean_absolute_error(tstY, pred)\n # print zip(pred, tstY)[:5]\n print nnlr.active\ndef tst2(X, Y, k=3, rad=4, mode='k'):\n rmse = []\n ame = []\n for k in range(1,20):\n trX = X[:-1200]\n trY = Y[:-1200]\n tstX = X[-400:]\n tstY = Y[-400:]\n\n nnlr = NNLR(k, rad, mode)\n\n nnlr.fit(trX, trY)\n\n pred = nnlr.predict(tstX)\n ms= mean_squared_error(tstY, pred)**.5\n m= mean_absolute_error(tstY, pred)\n # print zip(pred, tstY)[:5]\n print nnlr.active\n ame.append(m)\n rmse.append(ms)\n plt.title('AME vs k')\n plt.xlabel('k')\n plt.ylabel('Holdout Average Mean Error')\n plt.plot(ame)\n plt.show()\n plt.title('RMSE vs k')\n plt.xlabel('k')\n plt.ylabel('Holdout RMSE')\n plt.plot(rmse)\n plt.show()\nimport matplotlib.pyplot as plt\ncm = plt.cm.get_cmap('RdYlBu')\ndef cscatter(x,y,z, xl, yl):\n # z = df.ViolentCrimesPerPop\n # xz = df.racepctblack*df.racePctHisp\n # xz=df.racePctWhite*df.racepctblack\n # xy = df.PctPopUnderPov\n sc = plt.scatter(x, y, c=z, cmap=coolwarm)\n plt.xlim(0,1.02)\n plt.ylim(-.01,1.01)\n # sc = plt.scatter(xy, xz, color=colors)\n plt.colorbar(sc,label='Violent Crime Rate',shrink=.5)\n plt.xlabel(xl)\n plt.ylabel(yl)\n # plt.show()\n"
] | true |
98,486 |
36e27fd48fac1249e4dae40edb129b48829bed8e
|
# import sqlalchemy
# from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# import plotly
# import plotly.graph_objs as go
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, render_template, url_for, flash, redirect
import pandas as pd
engine = create_engine("sqlite:///db/Performance.sqlite")
conn = engine.connect()
session=Session(engine)
# Define the app as a flask app
app= Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.Performance'
# Define the home
@app.route("/")
def index():
"""Return the homepage."""
return render_template("index.html")
@app.route('/scores')
def scores():
scores_df = pd.read_sql("SELECT * FROM scores", engine)
scores_df['Mean Scale Score'] = pd.to_numeric(scores_df['Mean Scale Score'], errors='coerce')
# # return scores_df.to_json(None, 'records')
return (scores_df.groupby('State/District/School')['Mean Scale Score'].mean()).to_json(None, 'split')
# # return jsonify(['score1', 'score2'])
# #Change just to trigger restart
@app.route('/retention')
def retention():
retention_df = pd.read_sql("SELECT * FROM retention", engine)
return (retention_df("Job Classification")['Mean'].mean()).to_json(None, 'split')
# @app.route('budget')
# @app.route('salary')
if __name__ == "__main__":
app.run(debug=True)
|
[
"\n# import sqlalchemy\n# from sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n# import plotly\n# import plotly.graph_objs as go\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flask import Flask, jsonify, render_template, url_for, flash, redirect\nimport pandas as pd\n\nengine = create_engine(\"sqlite:///db/Performance.sqlite\")\n\n\nconn = engine.connect()\nsession=Session(engine)\n\n# Define the app as a flask app\napp= Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.Performance'\n\n# Define the home\[email protected](\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\[email protected]('/scores')\ndef scores():\n scores_df = pd.read_sql(\"SELECT * FROM scores\", engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df['Mean Scale Score'], errors='coerce')\n# # return scores_df.to_json(None, 'records')\nreturn (scores_df.groupby('State/District/School')['Mean Scale Score'].mean()).to_json(None, 'split')\n# # return jsonify(['score1', 'score2'])\n# #Change just to trigger restart\n\[email protected]('/retention')\ndef retention():\n retention_df = pd.read_sql(\"SELECT * FROM retention\", engine)\n return (retention_df(\"Job Classification\")['Mean'].mean()).to_json(None, 'split')\n\n\n# @app.route('budget')\n\n# @app.route('salary')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n \n \n",
"from sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask, jsonify, render_template, url_for, flash, redirect\nimport pandas as pd\nengine = create_engine('sqlite:///db/Performance.sqlite')\nconn = engine.connect()\nsession = Session(engine)\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.Performance'\n\n\[email protected]('/')\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/scores')\ndef scores():\n scores_df = pd.read_sql('SELECT * FROM scores', engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df[\n 'Mean Scale Score'], errors='coerce')\n\n\nreturn scores_df.groupby('State/District/School')['Mean Scale Score'].mean(\n ).to_json(None, 'split')\n\n\[email protected]('/retention')\ndef retention():\n retention_df = pd.read_sql('SELECT * FROM retention', engine)\n return retention_df('Job Classification')['Mean'].mean().to_json(None,\n 'split')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\nengine = create_engine('sqlite:///db/Performance.sqlite')\nconn = engine.connect()\nsession = Session(engine)\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.Performance'\n\n\[email protected]('/')\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/scores')\ndef scores():\n scores_df = pd.read_sql('SELECT * FROM scores', engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df[\n 'Mean Scale Score'], errors='coerce')\n\n\nreturn scores_df.groupby('State/District/School')['Mean Scale Score'].mean(\n ).to_json(None, 'split')\n\n\[email protected]('/retention')\ndef retention():\n retention_df = pd.read_sql('SELECT * FROM retention', engine)\n return retention_df('Job Classification')['Mean'].mean().to_json(None,\n 'split')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/scores')\ndef scores():\n scores_df = pd.read_sql('SELECT * FROM scores', engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df[\n 'Mean Scale Score'], errors='coerce')\n\n\nreturn scores_df.groupby('State/District/School')['Mean Scale Score'].mean(\n ).to_json(None, 'split')\n\n\[email protected]('/retention')\ndef retention():\n retention_df = pd.read_sql('SELECT * FROM retention', engine)\n return retention_df('Job Classification')['Mean'].mean().to_json(None,\n 'split')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/scores')\ndef scores():\n scores_df = pd.read_sql('SELECT * FROM scores', engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df[\n 'Mean Scale Score'], errors='coerce')\n\n\n<code token>\n\n\[email protected]('/retention')\ndef retention():\n retention_df = pd.read_sql('SELECT * FROM retention', engine)\n return retention_df('Job Classification')['Mean'].mean().to_json(None,\n 'split')\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/scores')\ndef scores():\n scores_df = pd.read_sql('SELECT * FROM scores', engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df[\n 'Mean Scale Score'], errors='coerce')\n\n\n<code token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template('index.html')\n\n\n<function token>\n<code token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n<function token>\n<code token>\n"
] | false |
98,487 |
72001855bff403162c70cd6a03ca0948382eb82e
|
# python3 code/pkc_cython/setup.py build_ext --inplace
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import pkc_cython
DTYPE = np.intc
def plot_graph(G):
nx.draw(G, with_labels=True)
plt.show()
G = nx.duplication_divergence_graph(10, 0.5, seed=10)
# plot_graph(G)
true_kcore = nx.core_number(G)
# Extract nodes and lists of neighbors from G as numpy arrays
n = G.number_of_nodes()
nodes = np.arange(n, dtype=DTYPE)
neighbors = [list(G.neighbors(n)) for n in nodes]
deg_init = np.array([len(x) for x in neighbors], dtype=DTYPE)
max_n_neighbors = max(len(x) for x in neighbors)
neighbors = [x + [-1]*(max_n_neighbors - len(x)) for x in neighbors]
neighbors = np.array(neighbors, dtype=DTYPE)
pkc_out = pkc_cython.pkc(deg_init, deg_init, neighbors)
|
[
"\n# python3 code/pkc_cython/setup.py build_ext --inplace\n\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nimport pkc_cython\n\nDTYPE = np.intc\n\ndef plot_graph(G):\n nx.draw(G, with_labels=True)\n plt.show()\n\nG = nx.duplication_divergence_graph(10, 0.5, seed=10)\n# plot_graph(G)\ntrue_kcore = nx.core_number(G)\n\n# Extract nodes and lists of neighbors from G as numpy arrays\nn = G.number_of_nodes()\nnodes = np.arange(n, dtype=DTYPE)\n\nneighbors = [list(G.neighbors(n)) for n in nodes]\ndeg_init = np.array([len(x) for x in neighbors], dtype=DTYPE)\n\nmax_n_neighbors = max(len(x) for x in neighbors)\nneighbors = [x + [-1]*(max_n_neighbors - len(x)) for x in neighbors]\nneighbors = np.array(neighbors, dtype=DTYPE)\n\npkc_out = pkc_cython.pkc(deg_init, deg_init, neighbors)\n",
"import numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport pkc_cython\nDTYPE = np.intc\n\n\ndef plot_graph(G):\n nx.draw(G, with_labels=True)\n plt.show()\n\n\nG = nx.duplication_divergence_graph(10, 0.5, seed=10)\ntrue_kcore = nx.core_number(G)\nn = G.number_of_nodes()\nnodes = np.arange(n, dtype=DTYPE)\nneighbors = [list(G.neighbors(n)) for n in nodes]\ndeg_init = np.array([len(x) for x in neighbors], dtype=DTYPE)\nmax_n_neighbors = max(len(x) for x in neighbors)\nneighbors = [(x + [-1] * (max_n_neighbors - len(x))) for x in neighbors]\nneighbors = np.array(neighbors, dtype=DTYPE)\npkc_out = pkc_cython.pkc(deg_init, deg_init, neighbors)\n",
"<import token>\nDTYPE = np.intc\n\n\ndef plot_graph(G):\n nx.draw(G, with_labels=True)\n plt.show()\n\n\nG = nx.duplication_divergence_graph(10, 0.5, seed=10)\ntrue_kcore = nx.core_number(G)\nn = G.number_of_nodes()\nnodes = np.arange(n, dtype=DTYPE)\nneighbors = [list(G.neighbors(n)) for n in nodes]\ndeg_init = np.array([len(x) for x in neighbors], dtype=DTYPE)\nmax_n_neighbors = max(len(x) for x in neighbors)\nneighbors = [(x + [-1] * (max_n_neighbors - len(x))) for x in neighbors]\nneighbors = np.array(neighbors, dtype=DTYPE)\npkc_out = pkc_cython.pkc(deg_init, deg_init, neighbors)\n",
"<import token>\n<assignment token>\n\n\ndef plot_graph(G):\n nx.draw(G, with_labels=True)\n plt.show()\n\n\n<assignment token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n"
] | false |
98,488 |
eedd64d28fd54fe6e6be15e3201123ad0d551195
|
import argparse
import logging
import os
import random
import time
import json
from datetime import datetime
import tempfile
import shutil
import numpy as np
import pandas as pd
import torch
from scipy.special import softmax
from torch.nn import CrossEntropyLoss
from transformers.optimization import (
AdamW, get_linear_schedule_with_warmup,
get_constant_schedule_with_warmup
)
from transformers.file_utils import (
PYTORCH_PRETRAINED_BERT_CACHE,
WEIGHTS_NAME, CONFIG_NAME
)
from tqdm import tqdm
from models.examples_to_features import (
get_dataloader_and_tensors,
models, tokenizers, DataProcessor, configs
)
from collections import defaultdict
from sklearn.metrics import (
precision_recall_fscore_support, classification_report
)
from torch.nn import CrossEntropyLoss
from utils.data_processing import (
EVAL_TAGS, EVAL_RELATIONS
)
from torch.utils.tensorboard import SummaryWriter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO
)
logger = logging.getLogger(__name__)
eval_logger = logging.getLogger("__scores__")
def compute_all_metrics(
sent_type_labels, sent_type_preds,
tags_sequence_labels, tags_sequence_preds,
relations_sequence_labels, relations_sequence_preds,
label2id, loss_info=None, logger=None
):
eval_tags_sequence_labels = [
(label2id['tags_sequence'][lab]) for lab in EVAL_TAGS
]
eval_relations_sequence_labels = [
(label2id['relations_sequence'][lab]) for lab in EVAL_RELATIONS
]
task_1_report = classification_report(
sent_type_labels, sent_type_preds, labels=[0, 1], output_dict=True
)
task_2_report = classification_report(
tags_sequence_labels, tags_sequence_preds,
labels=eval_tags_sequence_labels, output_dict=True
)
task_3_report = classification_report(
relations_sequence_labels, relations_sequence_preds,
labels=eval_relations_sequence_labels, output_dict=True
)
result = {}
for x in ['0', '1', 'weighted avg', 'macro avg']:
for metrics in ['precision', 'recall', 'f1-score', 'support']:
result[f"sent_type_{x.replace(' ', '-')}_{metrics}"] = \
round(task_1_report[x][metrics], 6)
id2label = {
val: key for key, val in label2id['tags_sequence'].items()
}
id2label['weighted avg'] = 'weighted-avg'
id2label['macro avg'] = 'macro-avg'
for x in eval_tags_sequence_labels + ['weighted avg', 'macro avg']:
for metrics in ['precision', 'recall', 'f1-score', 'support']:
result[f"tags_sequence_{id2label[x]}_{metrics}"] = \
round(task_2_report[str(x)][metrics], 6)
id2label = {
val: key for key, val in label2id['relations_sequence'].items()
}
id2label['weighted avg'] = 'weighted-avg'
id2label['macro avg'] = 'macro-avg'
for x in eval_relations_sequence_labels + ['weighted avg', 'macro avg']:
for metrics in ['precision', 'recall', 'f1-score', 'support']:
result[f"relations_sequence_{id2label[x]}_{metrics}"] = \
round(task_3_report[str(x)][metrics], 6)
if logger is not None:
logger.info("=====================================")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
if loss_info is not None:
for key in sorted(loss_info.keys()):
logger.info(
" %s = %s", key, str(loss_info[key])
)
return result
def evaluate(
model, device, eval_dataloader,
eval_sent_type_labels_ids,
eval_tags_sequence_labels_ids,
eval_relations_sequence_labels_ids,
label2id,
compute_metrics=True,
verbose=False, cur_train_mean_loss=None,
logger=None,
skip_every_n_examples=1
):
model.eval()
num_sent_type_labels = model.num_sent_type_labels
num_tags_sequence_labels = model.num_tags_sequence_labels
num_relations_sequence_labels = model.num_relations_sequence_labels
sent_type_clf_weight = model.sent_type_clf_weight
tags_sequence_clf_weight = model.tags_sequence_clf_weight
relations_sequence_clf_weight = model.relations_sequence_clf_weight
eval_loss = defaultdict(float)
nb_eval_steps = 0
preds = defaultdict(list)
for batch_id, batch in enumerate(tqdm(
eval_dataloader, total=len(eval_dataloader),
desc='validation ... '
)):
if skip_every_n_examples != 1 and (batch_id + 1) % skip_every_n_examples != 1:
continue
batch = tuple([elem.to(device) for elem in batch])
input_ids, input_mask, segment_ids, \
sent_type_labels_ids, tags_sequence_labels_ids, \
relations_sequence_labels_ids, token_valid_pos_ids = batch
with torch.no_grad():
outputs, loss = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
sent_type_labels=sent_type_labels_ids,
tags_sequence_labels=tags_sequence_labels_ids,
relations_sequence_labels=relations_sequence_labels_ids,
token_valid_pos_ids=token_valid_pos_ids,
device=device
)
sent_type_logits, tags_sequence_logits, \
relations_sequence_logits = outputs[:3]
if compute_metrics:
eval_loss['sent_type_loss'] += \
loss['sent_type_loss'].mean().item()
eval_loss['tags_sequence_loss'] += \
loss['tags_sequence_loss'].mean().item()
eval_loss['relations_sequence_loss'] += \
loss['relations_sequence_loss'].mean().item()
eval_loss['weighted_loss'] += \
loss['weighted_loss'].mean().item()
nb_eval_steps += 1
preds['sent_type'].append(
sent_type_logits.detach().cpu().numpy()
)
preds['tags_sequence'].append(
tags_sequence_logits.detach().cpu().numpy()
)
preds['relations_sequence'].append(
relations_sequence_logits.detach().cpu().numpy()
)
preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)
preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)
preds['relations_sequence'] = np.concatenate(
preds['relations_sequence'],
axis=0
)
scores = {}
for key in preds:
scores[key] = softmax(preds[key], axis=-1).max(axis=-1)
preds[key] = preds[key].argmax(axis=-1)
if compute_metrics:
for key in eval_loss:
eval_loss[key] = eval_loss[key] / nb_eval_steps
if cur_train_mean_loss is not None:
eval_loss.update(cur_train_mean_loss)
result = compute_all_metrics(
eval_sent_type_labels_ids.numpy(), preds['sent_type'],
np.array([x for y in eval_tags_sequence_labels_ids.numpy() for x in y]),
np.array([x for y in preds['tags_sequence'] for x in y]),
np.array([x for y in eval_relations_sequence_labels_ids.numpy() for x in y]),
np.array([x for y in preds['relations_sequence'] for x in y]),
label2id, loss_info=eval_loss,
logger=logger
)
else:
result = {}
for key in eval_loss:
result[key] = eval_loss[key]
model.train()
return preds, result, scores
def main(args):
# only for heatmap
# if args.sent_type_clf_weight < 1 and args.relations_sequence_clf_weight < 1:
# print(f'skipping ... {args.output_dir}: both tasks 1 and 3 weights below 1.0')
# return
if os.path.exists(args.output_dir) and args.do_train:
from glob import glob
tsv_files = glob(os.path.join(args.output_dir, '*best*tsv'))
if tsv_files:
print('already computed: skipping ...')
return
else:
print(f'already existing {args.output_dir}. but without weight file and tsv files ...')
os.system(f'rm -r {args.output_dir}')
assert args.context_mode in ['full', 'center', 'left', 'right']
# if args.output_dirs_to_exclude != '':
# output_dirs_to_exclue = json.load(open(args.output_dirs_to_exclude))
# else:
# output_dirs_to_exclue = []
# if args.output_dir in output_dirs_to_exclue:
# print(f'skipping ... {args.output_dir}: from exclude output dirs')
# return 0
# only for predicting
source_model = os.path.join(
args.output_dir, f'{args.model_prefix}pytorch_model.bin'
)
dest_model = os.path.join(
args.output_dir, 'pytorch_model.bin'
)
rm_model = False
if args.do_eval:
if not os.path.exists(
source_model
):
print(f'returning ... not found {source_model}')
return
if source_model != dest_model:
rm_model = True
dest_tmp_model_path = tempfile.mkdtemp()
os.system(f'cp {source_model} {os.path.join(dest_tmp_model_path, "pytorch_model.bin")}')
os.system(f'cp {os.path.join(args.output_dir, "config.json")} {os.path.join(dest_tmp_model_path, "config.json")}')
os.system(f'cp {os.path.join(args.output_dir, "vocab.txt")} {os.path.join(dest_tmp_model_path, "vocab.txt")}')
else:
dest_tmp_model_path = args.output_dir
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
if args.gradient_accumulation_steps < 1:
raise ValueError(
"gradient_accumulation_steps parameter should be >= 1"
)
args.train_batch_size = \
args.train_batch_size // args.gradient_accumulation_steps
if args.do_train:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True."
)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
elif args.do_train or args.do_validate:
raise ValueError(args.output_dir, 'output_dir already exists')
suffix = datetime.now().isoformat().replace('-', '_').replace(
':', '_').split('.')[0].replace('T', '-')
if args.do_train:
train_writer = SummaryWriter(
log_dir=os.path.join(
'tensorboard', args.output_dir, 'train'
)
)
dev_writer = SummaryWriter(
log_dir=os.path.join(
'tensorboard', args.output_dir, 'dev'
)
)
logger.addHandler(logging.FileHandler(
os.path.join(args.output_dir, f"train_{suffix}.log"), 'w')
)
eval_logger.addHandler(logging.FileHandler(
os.path.join(args.output_dir, f"scores_{suffix}.log"), 'w')
)
else:
logger.addHandler(logging.FileHandler(
os.path.join(args.output_dir, f"eval_{suffix}.log"), 'w')
)
logger.info(args)
logger.info("device: {}, n_gpu: {}".format(device, n_gpu))
processor = DataProcessor(
filter_task_1=args.filter_task_1,
filter_task_3=args.filter_task_3
)
eval_metrics = {
eval_metric: True for eval_metric in args.eval_metrics.split('+')
}
if args.filter_task_1 and args.do_train:
assert args.sent_type_clf_weight == 0.0
eval_metrics.pop('sent_type_1_f1-score')
if args.filter_task_3 and args.do_train:
assert args.relations_sequence_clf_weight == 0.0
eval_metrics.pop('relations_sequence_macro-avg_f1-score')
if args.sent_type_clf_weight == 0.0 and \
'sent_type_1_f1-score' in eval_metrics:
eval_metrics.pop('sent_type_1_f1-score')
if args.tags_sequence_clf_weight == 0.0 and \
'tags_sequence_macro-avg_f1-score' in eval_metrics:
eval_metrics.pop('tags_sequence_macro-avg_f1-score')
if args.relations_sequence_clf_weight == 0.0 and \
'relations_sequence_macro-avg_f1-score' in eval_metrics:
eval_metrics.pop('relations_sequence_macro-avg_f1-score')
assert len(eval_metrics) > 0, "inconsistent train params"
if args.context_mode != 'full':
keys = list(eval_metrics.keys())
for key in keys:
if key != 'sent_type_1_f1-score':
eval_metrics.pop(key)
assert 'sent_type_1_f1-score' in eval_metrics
sent_type_labels_list = \
processor.get_sent_type_labels(args.data_dir, logger)
tags_sequence_labels_list = \
processor.get_sequence_labels(
args.data_dir, logger=logger, sequence_type='tags_sequence'
)
relations_sequence_labels_list = \
processor.get_sequence_labels(
args.data_dir, logger=logger, sequence_type='relations_sequence'
)
label2id = {
'sent_type': {
label: i for i, label in enumerate(sent_type_labels_list)
},
'tags_sequence': {
label: i for i, label in enumerate(tags_sequence_labels_list, 1)
},
'relations_sequence': {
label: i for i, label in enumerate(relations_sequence_labels_list, 1)
}
}
id2label = {
'sent_type': {
i: label for i, label in enumerate(sent_type_labels_list)
},
'tags_sequence': {
i: label for i, label in enumerate(tags_sequence_labels_list, 1)
},
'relations_sequence': {
i: label for i, label in enumerate(relations_sequence_labels_list, 1)
}
}
num_sent_type_labels = len(sent_type_labels_list)
num_tags_sequence_labels = len(tags_sequence_labels_list) + 1
num_relations_sequence_labels = len(relations_sequence_labels_list) + 1
do_lower_case = 'uncased' in args.model
tokenizer = tokenizers[args.model].from_pretrained(
args.model, do_lower_case=do_lower_case
)
model_name = args.model
if args.do_train:
config = configs[args.model]
config = config.from_pretrained(
args.model,
hidden_dropout_prob=args.dropout
)
model = models[model_name].from_pretrained(
args.model, cache_dir=str(PYTORCH_PRETRAINED_BERT_CACHE),
num_sent_type_labels=num_sent_type_labels,
num_tags_sequence_labels=num_tags_sequence_labels,
num_relations_sequence_labels=num_relations_sequence_labels,
sent_type_clf_weight=args.sent_type_clf_weight,
tags_sequence_clf_weight=args.tags_sequence_clf_weight,
relations_sequence_clf_weight=args.relations_sequence_clf_weight,
pooling_type=args.subtokens_pooling_type,
config=config
)
print(
"task weights:",
model.sent_type_clf_weight,
model.tags_sequence_clf_weight,
model.relations_sequence_clf_weight
)
else:
model = models[model_name].from_pretrained(
dest_tmp_model_path,
num_sent_type_labels=num_sent_type_labels,
num_tags_sequence_labels=num_tags_sequence_labels,
num_relations_sequence_labels=num_relations_sequence_labels,
sent_type_clf_weight=args.sent_type_clf_weight,
tags_sequence_clf_weight=args.tags_sequence_clf_weight,
relations_sequence_clf_weight=args.relations_sequence_clf_weight,
pooling_type=args.subtokens_pooling_type
)
model.to(device)
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features, eval_new_examples = model.convert_examples_to_features(
eval_examples, label2id, args.max_seq_length,
tokenizer, logger, args.sequence_mode, context_mode=args.context_mode
)
logger.info("***** Dev *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_dataloader, eval_sent_type_labels_ids, \
eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids = \
get_dataloader_and_tensors(eval_features, args.eval_batch_size)
if not args.do_eval:
test_file = os.path.join(
args.data_dir, 'test.json'
) if args.test_file == '' else args.test_file
test_examples = processor.get_test_examples(test_file)
test_features, test_new_examples = model.convert_examples_to_features(
test_examples, label2id, args.max_seq_length,
tokenizer, logger, args.sequence_mode, context_mode=args.context_mode
)
logger.info("***** Test *****")
logger.info(" Num examples = %d", len(test_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
test_dataloader, test_sent_type_labels_ids, \
test_tags_sequence_labels_ids, test_relations_sequence_labels_ids = \
get_dataloader_and_tensors(test_features, args.eval_batch_size)
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
train_features, _ = model.convert_examples_to_features(
train_examples, label2id,
args.max_seq_length, tokenizer, logger, args.sequence_mode,
context_mode=args.context_mode
)
if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':
train_features = sorted(
train_features, key=lambda f: np.sum(f.input_mask)
)
else:
random.shuffle(train_features)
train_dataloader, sent_type_ids, tags_sequence_ids, \
relations_sequence_ids = \
get_dataloader_and_tensors(train_features, args.train_batch_size)
train_batches = [batch for batch in train_dataloader]
num_train_optimization_steps = \
len(train_dataloader) // args.gradient_accumulation_steps * \
args.num_train_epochs
warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)
logger.info("***** Training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
best_result = defaultdict(float)
for eval_metric in eval_metrics:
best_result[eval_metric] = args.threshold
if eval_metric.startswith('sent_type'):
best_result[eval_metric] += 0.2
print('best results thresholds:')
print(best_result)
eval_step = max(1, len(train_batches) // args.eval_per_epoch)
lr = float(args.learning_rate)
# if n_gpu > 1:
# model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{
'params': [
param for name, param in param_optimizer
if not any(nd in name for nd in no_decay)
],
'weight_decay': float(args.weight_decay)
},
{
'params': [
param for name, param in param_optimizer
if any(nd in name for nd in no_decay)
],
'weight_decay': 0.0
}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=lr
)
if args.lr_schedule == 'constant_warmup':
print('lr schedule = constant_warmup')
scheduler = get_constant_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps
)
else:
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_optimization_steps
)
start_time = time.time()
global_step = 0
for epoch in range(1, 1 + int(args.num_train_epochs)):
tr_loss = 0
nb_tr_examples = 0
nb_tr_steps = 0
cur_train_loss = defaultdict(float)
model.train()
logger.info("Start epoch #{} (lr = {})...".format(epoch, lr))
if args.train_mode == 'random' or args.train_mode == 'random_sorted':
random.shuffle(train_batches)
for step, batch in enumerate(
tqdm(
train_batches, total=len(train_batches),
desc='training ... '
)
):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, \
sent_type_labels_ids, tags_sequence_labels_ids, \
relations_sequence_labels_ids, token_valid_pos_ids = batch
train_loss = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
sent_type_labels=sent_type_labels_ids,
tags_sequence_labels=tags_sequence_labels_ids,
relations_sequence_labels=relations_sequence_labels_ids,
token_valid_pos_ids=token_valid_pos_ids,
return_outputs=False,
device=device
)
for key in train_loss:
cur_train_loss[key] += train_loss[key].mean().item()
loss_to_optimize = train_loss['weighted_loss']
if n_gpu > 1:
loss_to_optimize = loss_to_optimize.mean()
if args.gradient_accumulation_steps > 1:
loss_to_optimize = \
loss_to_optimize / args.gradient_accumulation_steps
loss_to_optimize.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(),
args.max_grad_norm
)
tr_loss += loss_to_optimize.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
if args.do_validate and (step + 1) % eval_step == 0:
logger.info(
'Ep: {}, Stp: {}/{}, usd_t={:.2f}s, loss={:.6f}'.format(
epoch, step + 1, len(train_batches),
time.time() - start_time, tr_loss / nb_tr_steps
)
)
predict_for_metrics = []
cur_train_mean_loss = {}
for key in cur_train_loss:
cur_train_mean_loss[f'train_{key}'] = \
cur_train_loss[key] / nb_tr_steps
preds, result, scores = evaluate(
model, device, eval_dataloader,
eval_sent_type_labels_ids,
eval_tags_sequence_labels_ids,
eval_relations_sequence_labels_ids,
label2id, cur_train_mean_loss=cur_train_mean_loss,
logger=eval_logger
)
result['global_step'] = global_step
result['epoch'] = epoch
result['learning_rate'] = lr
result['batch_size'] = \
args.train_batch_size * args.gradient_accumulation_steps
for key, value in result.items():
dev_writer.add_scalar(key, value, global_step)
for key,value in cur_train_mean_loss.items():
train_writer.add_scalar(
f'running_train_{key}', value, global_step
)
logger.info("First 20 predictions:")
for sent_type_pred, sent_type_label in zip(
preds['sent_type'][:20],
eval_sent_type_labels_ids.numpy()[:20]
):
sign = u'\u2713' \
if sent_type_pred == sent_type_label else u'\u2718'
logger.info(
"pred = %s, label = %s %s" % (
id2label['sent_type'][sent_type_pred],
id2label['sent_type'][sent_type_label],
sign
)
)
for eval_metric in eval_metrics:
if result[eval_metric] > best_result[eval_metric]:
best_result[eval_metric] = result[eval_metric]
logger.info("!!! Best dev %s (lr=%s, epoch=%d): %.2f" %
(
eval_metric,
str(lr), epoch,
result[eval_metric] * 100.0
)
)
predict_for_metrics.append(eval_metric)
for metric_id, eval_metric in tqdm(
enumerate(predict_for_metrics), total=len(predict_for_metrics),
desc='writing predictions ... '
):
dest_file = f'dev_best_{eval_metric}'
write_predictions(
args, eval_new_examples, eval_features, preds,
scores, dest_file,
label2id=label2id, id2label=id2label,
metrics=result
)
if metric_id == 0:
test_preds, test_result, test_scores = evaluate(
model, device, test_dataloader,
test_sent_type_labels_ids,
test_tags_sequence_labels_ids,
test_relations_sequence_labels_ids,
label2id, cur_train_mean_loss=None,
logger=None
)
output_model_file = os.path.join(
args.output_dir,
f"best_{eval_metric}_{WEIGHTS_NAME}"
)
save_model(
args, model, tokenizer, output_model_file
)
for metric in predict_for_metrics[1:]:
dest_model_path = os.path.join(
args.output_dir,
f"best_{metric}_{WEIGHTS_NAME}"
)
os.system(
f'cp {output_model_file} {dest_model_path}'
)
dest_file = f'test_best_{eval_metric}'
write_predictions(
args, test_new_examples, test_features, test_preds,
test_scores, dest_file,
label2id=label2id, id2label=id2label,
metrics=test_result
)
if args.log_train_metrics:
preds, result, scores = evaluate(
model, device, train_dataloader,
sent_type_ids,
tags_sequence_ids,
relations_sequence_ids,
label2id, logger=logger,
skip_every_n_examples=args.skip_every_n_examples
)
result['global_step'] = global_step
result['epoch'] = epoch
result['learning_rate'] = lr
result['batch_size'] = \
args.train_batch_size * args.gradient_accumulation_steps
for key, value in result.items():
train_writer.add_scalar(key, value, global_step)
if args.do_eval:
test_files = os.path.join(
args.data_dir, 'test.json'
) if args.test_file == '' else args.test_file
for test_file in test_files.split("8"):
test_examples = processor.get_test_examples(test_file)
test_features, test_new_examples = model.convert_examples_to_features(
test_examples, label2id, args.max_seq_length,
tokenizer, logger, args.sequence_mode, context_mode=args.context_mode
)
logger.info("***** Test *****")
logger.info(" Num examples = %d", len(test_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
test_dataloader, test_sent_type_labels_ids, \
test_tags_sequence_labels_ids, test_relations_sequence_labels_ids = \
get_dataloader_and_tensors(test_features, args.eval_batch_size)
preds, result, scores = evaluate(
model, device, test_dataloader,
test_sent_type_labels_ids,
test_tags_sequence_labels_ids,
test_relations_sequence_labels_ids,
label2id,
compute_metrics=False
)
dest_file = args.model_prefix + test_file.split('/')[-1].replace('.json', '')
write_predictions(
args, test_new_examples, test_features,
preds, scores, dest_file,
label2id=label2id, id2label=id2label, metrics=result
)
if rm_model:
shutil.rmtree(dest_tmp_model_path)
def save_model(args, model, tokenizer, output_model_file):
start = time.time()
model_to_save = \
model.module if hasattr(model, 'module') else model
output_config_file = os.path.join(
args.output_dir, CONFIG_NAME
)
torch.save(
model_to_save.state_dict(), output_model_file
)
model_to_save.config.to_json_file(
output_config_file
)
tokenizer.save_vocabulary(args.output_dir)
print(f'model saved in {time.time() - start} seconds to {output_model_file}')
def write_predictions(
args, examples, features, preds,
scores, dest_file, label2id, id2label, metrics=None
):
aggregated_results = {}
orig_positions_map = [ex.orig_positions_map for ex in features]
neg_label_mapper = {
'tags_sequence': 'O',
'relations_sequence': '0'
}
for task in ['tags_sequence', 'relations_sequence']:
aggregated_results[task] = [
list(pred[orig_positions]) + \
[
label2id[task][neg_label_mapper[task]]
] * (len(ex.tokens) - len(orig_positions))
for pred, orig_positions, ex in zip(
preds[task],
orig_positions_map,
examples
)
]
aggregated_results[f'{task}_scores'] = [
list(score[orig_positions]) + \
[0.999] * (len(ex.tokens) - len(orig_positions))
for score, orig_positions, ex in zip(
scores[task],
orig_positions_map,
examples
)
]
prediction_results = {
'idx': [
ex.guid for ex in examples
],
'tokens': [
' '.join(ex.tokens) for ex in examples
],
'sent_type_label': [
ex.sent_type for ex in examples
],
'sent_type_pred': [
id2label['sent_type'][x] for x in preds['sent_type']
],
'sent_type_scores': [
str(score) for score in scores['sent_type']
],
'sent_start': [
ex.sent_start for ex in examples
],
'sent_end': [
ex.sent_end for ex in examples
],
'tags_sequence_labels': [
' '.join(ex.tags_sequence) for ex in examples
],
'tags_sequence_pred': [
' '.join([id2label['tags_sequence'][x] if x != 0 else 'O' for x in sent])
for sent in aggregated_results['tags_sequence']
],
'tags_sequence_scores': [
' '.join([str(score) for score in sent])
for sent in aggregated_results['tags_sequence_scores']
],
'tags_ids': [
' '.join(ex.tags_ids) for ex in examples
],
'relations_sequence_labels': [
' '.join(ex.relations_sequence) for ex in examples
],
'relations_sequence_pred': [
' '.join([id2label['relations_sequence'][x] if x != 0 else '0' for x in sent])
for sent in aggregated_results['relations_sequence']
],
'relations_sequence_scores': [
' '.join([str(score) for score in sent])
for sent in aggregated_results['relations_sequence_scores']
],
'subj_start': [
ex.subj_start for ex in examples
],
'subj_end': [
ex.subj_end for ex in examples
],
'infile_offsets': [
' '.join([
str(offset) for offset in ex.infile_offsets
]) for ex in examples
],
'start_char': [
' '.join(ex.start_char) for ex in examples
],
'end_char': [
' '.join(ex.end_char) for ex in examples
],
'source': [
ex.source for ex in examples
]
}
prediction_results = pd.DataFrame(prediction_results)
prediction_results.to_csv(
os.path.join(
args.output_dir,
f"{dest_file}.tsv"),
sep='\t', index=False
)
if metrics is not None:
with open(
os.path.join(
args.output_dir,
f"{dest_file}_eval_results.txt"
), "w"
) as f:
for key in sorted(metrics.keys()):
f.write("%s = %s\n" % (key, str(metrics[key])))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--test_file", default='', type=str, required=False)
parser.add_argument("--model", default='bert-large-uncased', type=str, required=True)
parser.add_argument("--data_dir", default='data', type=str, required=True,
help="The input data dir. Should contain the .json files for the task.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--eval_per_epoch", default=4, type=int,
help="How many times to do validation on dev set per epoch")
parser.add_argument("--max_seq_length", default=256, type=int,
help="The maximum total input sequence length after WordPiece tokenization.\n"
"Sequences longer than this will be truncated, and sequences shorter\n"
"than this will be padded.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--train_mode", type=str, default='random_sorted',
choices=['random', 'sorted', 'random_sorted'])
parser.add_argument("--do_validate", action='store_true', help="Whether to run validation on dev set.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the test set.")
parser.add_argument("--train_batch_size", default=32, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Total batch size for eval.")
parser.add_argument(
"--eval_metrics",
default="+".join([
"sent_type_1_f1-score",
"tags_sequence_macro-avg_f1-score",
"relations_sequence_macro-avg_f1-score"
]),
type=str
)
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=6.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup.\n"
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="maximal gradient norm")
parser.add_argument("--sent_type_clf_weight", default=1.0, type=float,
help="the weight of task 1")
parser.add_argument("--tags_sequence_clf_weight", default=1.0, type=float,
help="The weight of task 2")
parser.add_argument("--relations_sequence_clf_weight", default=1.0, type=float,
help="The weight of task 3")
parser.add_argument("--weight_decay", default=0.1, type=float,
help="weight_decay coefficient for regularization")
parser.add_argument("--dropout", default=0.1, type=float,
help="dropout rate")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--filter_task_3", action="store_true",
help="exclude task 3 from training")
parser.add_argument("--filter_task_1", action="store_true",
help="exclude task 1 from training")
parser.add_argument("--subtokens_pooling_type", type=str, default="first",
help="pooling mode in bert-ner, one of avg or first")
parser.add_argument("--sequence_mode", type=str, default="not-all",
help="train to predict for all subtokens or not"
"all or not-all")
parser.add_argument("--context_mode", type=str, default="full",
help="context for task 1: one from center, full, left, right")
parser.add_argument("--lr_schedule", type=str, default="linear_warmup",
help="lr adjustment schedule")
parser.add_argument("--log_train_metrics", action="store_true",
help="compute metrics for train set too")
parser.add_argument("--threshold", type=float, default=0.30,
help="threshold for best models to save")
parser.add_argument("--output_dirs_to_exclude", type=str, default='',
help="path to json file containing list of output" + \
" dirs to exclude fome trainig")
parser.add_argument("--skip_every_n_examples", type=int, default=30,
help="number examples in train set to skip in evaluating metrics")
parser.add_argument("--model_prefix", type=str, default='best_sent_type_1_f1-score_',
help="pefix of the model weight")
parsed_args = parser.parse_args()
main(parsed_args)
|
[
"import argparse\nimport logging\nimport os\nimport random\nimport time\nimport json\nfrom datetime import datetime\nimport tempfile\nimport shutil\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom scipy.special import softmax\n\nfrom torch.nn import CrossEntropyLoss\n\nfrom transformers.optimization import (\n AdamW, get_linear_schedule_with_warmup,\n get_constant_schedule_with_warmup\n)\nfrom transformers.file_utils import (\n PYTORCH_PRETRAINED_BERT_CACHE,\n WEIGHTS_NAME, CONFIG_NAME\n)\n\nfrom tqdm import tqdm\nfrom models.examples_to_features import (\n get_dataloader_and_tensors,\n models, tokenizers, DataProcessor, configs\n)\nfrom collections import defaultdict\nfrom sklearn.metrics import (\n precision_recall_fscore_support, classification_report\n)\nfrom torch.nn import CrossEntropyLoss\nfrom utils.data_processing import (\n EVAL_TAGS, EVAL_RELATIONS\n)\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\neval_logger = logging.getLogger(\"__scores__\")\n\n\ndef compute_all_metrics(\n sent_type_labels, sent_type_preds,\n tags_sequence_labels, tags_sequence_preds,\n relations_sequence_labels, relations_sequence_preds,\n label2id, loss_info=None, logger=None\n):\n eval_tags_sequence_labels = [\n (label2id['tags_sequence'][lab]) for lab in EVAL_TAGS\n ]\n eval_relations_sequence_labels = [\n (label2id['relations_sequence'][lab]) for lab in EVAL_RELATIONS\n ]\n\n task_1_report = classification_report(\n sent_type_labels, sent_type_preds, labels=[0, 1], output_dict=True\n )\n task_2_report = classification_report(\n tags_sequence_labels, tags_sequence_preds,\n labels=eval_tags_sequence_labels, output_dict=True\n )\n task_3_report = classification_report(\n relations_sequence_labels, relations_sequence_preds,\n labels=eval_relations_sequence_labels, output_dict=True\n )\n\n result = {}\n for x in ['0', '1', 'weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"sent_type_{x.replace(' ', '-')}_{metrics}\"] = \\\n round(task_1_report[x][metrics], 6)\n\n id2label = {\n val: key for key, val in label2id['tags_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in eval_tags_sequence_labels + ['weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"tags_sequence_{id2label[x]}_{metrics}\"] = \\\n round(task_2_report[str(x)][metrics], 6)\n\n id2label = {\n val: key for key, val in label2id['relations_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in eval_relations_sequence_labels + ['weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"relations_sequence_{id2label[x]}_{metrics}\"] = \\\n round(task_3_report[str(x)][metrics], 6)\n if logger is not None:\n logger.info(\"=====================================\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n\n if loss_info is not None:\n for key in sorted(loss_info.keys()):\n logger.info(\n \" %s = %s\", key, str(loss_info[key])\n )\n\n return result\n\n\ndef evaluate(\n model, device, eval_dataloader,\n eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids,\n eval_relations_sequence_labels_ids,\n label2id,\n compute_metrics=True,\n verbose=False, cur_train_mean_loss=None,\n logger=None,\n skip_every_n_examples=1\n ):\n model.eval()\n\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n\n\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n\n for batch_id, batch in enumerate(tqdm(\n eval_dataloader, total=len(eval_dataloader),\n desc='validation ... '\n )):\n\n if skip_every_n_examples != 1 and (batch_id + 1) % skip_every_n_examples != 1:\n continue\n\n batch = tuple([elem.to(device) for elem in batch])\n\n input_ids, input_mask, segment_ids, \\\n sent_type_labels_ids, tags_sequence_labels_ids, \\\n relations_sequence_labels_ids, token_valid_pos_ids = batch\n\n with torch.no_grad():\n outputs, loss = model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n sent_type_labels=sent_type_labels_ids,\n tags_sequence_labels=tags_sequence_labels_ids,\n relations_sequence_labels=relations_sequence_labels_ids,\n token_valid_pos_ids=token_valid_pos_ids,\n device=device\n )\n\n sent_type_logits, tags_sequence_logits, \\\n relations_sequence_logits = outputs[:3]\n\n if compute_metrics:\n eval_loss['sent_type_loss'] += \\\n loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += \\\n loss['tags_sequence_loss'].mean().item()\n eval_loss['relations_sequence_loss'] += \\\n loss['relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += \\\n loss['weighted_loss'].mean().item()\n\n nb_eval_steps += 1\n preds['sent_type'].append(\n sent_type_logits.detach().cpu().numpy()\n )\n preds['tags_sequence'].append(\n tags_sequence_logits.detach().cpu().numpy()\n )\n preds['relations_sequence'].append(\n relations_sequence_logits.detach().cpu().numpy()\n )\n\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(\n preds['relations_sequence'],\n axis=0\n )\n\n scores = {}\n\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n\n result = compute_all_metrics(\n eval_sent_type_labels_ids.numpy(), preds['sent_type'],\n np.array([x for y in eval_tags_sequence_labels_ids.numpy() for x in y]),\n np.array([x for y in preds['tags_sequence'] for x in y]),\n np.array([x for y in eval_relations_sequence_labels_ids.numpy() for x in y]),\n np.array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss,\n logger=logger\n )\n else:\n result = {}\n\n for key in eval_loss:\n result[key] = eval_loss[key]\n\n model.train()\n\n return preds, result, scores\n\n\ndef main(args):\n\n # only for heatmap\n # if args.sent_type_clf_weight < 1 and args.relations_sequence_clf_weight < 1:\n # print(f'skipping ... {args.output_dir}: both tasks 1 and 3 weights below 1.0')\n # return\n\n if os.path.exists(args.output_dir) and args.do_train:\n from glob import glob\n tsv_files = glob(os.path.join(args.output_dir, '*best*tsv'))\n if tsv_files:\n print('already computed: skipping ...')\n return\n else:\n print(f'already existing {args.output_dir}. but without weight file and tsv files ...')\n os.system(f'rm -r {args.output_dir}')\n\n\n assert args.context_mode in ['full', 'center', 'left', 'right']\n\n # if args.output_dirs_to_exclude != '':\n # output_dirs_to_exclue = json.load(open(args.output_dirs_to_exclude))\n # else:\n # output_dirs_to_exclue = []\n\n # if args.output_dir in output_dirs_to_exclue:\n # print(f'skipping ... {args.output_dir}: from exclude output dirs')\n # return 0\n\n # only for predicting\n source_model = os.path.join(\n args.output_dir, f'{args.model_prefix}pytorch_model.bin'\n )\n dest_model = os.path.join(\n args.output_dir, 'pytorch_model.bin'\n )\n rm_model = False\n if args.do_eval:\n if not os.path.exists(\n source_model\n ):\n print(f'returning ... not found {source_model}')\n return\n if source_model != dest_model:\n rm_model = True\n dest_tmp_model_path = tempfile.mkdtemp()\n os.system(f'cp {source_model} {os.path.join(dest_tmp_model_path, \"pytorch_model.bin\")}')\n os.system(f'cp {os.path.join(args.output_dir, \"config.json\")} {os.path.join(dest_tmp_model_path, \"config.json\")}')\n os.system(f'cp {os.path.join(args.output_dir, \"vocab.txt\")} {os.path.join(dest_tmp_model_path, \"vocab.txt\")}')\n else:\n dest_tmp_model_path = args.output_dir\n\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\n \"gradient_accumulation_steps parameter should be >= 1\"\n )\n\n args.train_batch_size = \\\n args.train_batch_size // args.gradient_accumulation_steps\n\n if args.do_train:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\n \"At least one of `do_train` or `do_eval` must be True.\"\n )\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n elif args.do_train or args.do_validate:\n raise ValueError(args.output_dir, 'output_dir already exists')\n\n suffix = datetime.now().isoformat().replace('-', '_').replace(\n ':', '_').split('.')[0].replace('T', '-')\n if args.do_train:\n\n train_writer = SummaryWriter(\n log_dir=os.path.join(\n 'tensorboard', args.output_dir, 'train'\n )\n )\n dev_writer = SummaryWriter(\n log_dir=os.path.join(\n 'tensorboard', args.output_dir, 'dev'\n )\n )\n\n logger.addHandler(logging.FileHandler(\n os.path.join(args.output_dir, f\"train_{suffix}.log\"), 'w')\n )\n eval_logger.addHandler(logging.FileHandler(\n os.path.join(args.output_dir, f\"scores_{suffix}.log\"), 'w')\n )\n else:\n logger.addHandler(logging.FileHandler(\n os.path.join(args.output_dir, f\"eval_{suffix}.log\"), 'w')\n )\n\n logger.info(args)\n logger.info(\"device: {}, n_gpu: {}\".format(device, n_gpu))\n\n processor = DataProcessor(\n filter_task_1=args.filter_task_1,\n filter_task_3=args.filter_task_3\n )\n\n eval_metrics = {\n eval_metric: True for eval_metric in args.eval_metrics.split('+')\n }\n\n if args.filter_task_1 and args.do_train:\n assert args.sent_type_clf_weight == 0.0\n eval_metrics.pop('sent_type_1_f1-score')\n\n if args.filter_task_3 and args.do_train:\n assert args.relations_sequence_clf_weight == 0.0\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n\n if args.sent_type_clf_weight == 0.0 and \\\n 'sent_type_1_f1-score' in eval_metrics:\n eval_metrics.pop('sent_type_1_f1-score')\n\n if args.tags_sequence_clf_weight == 0.0 and \\\n 'tags_sequence_macro-avg_f1-score' in eval_metrics:\n eval_metrics.pop('tags_sequence_macro-avg_f1-score')\n\n if args.relations_sequence_clf_weight == 0.0 and \\\n 'relations_sequence_macro-avg_f1-score' in eval_metrics:\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n\n assert len(eval_metrics) > 0, \"inconsistent train params\"\n\n if args.context_mode != 'full':\n keys = list(eval_metrics.keys())\n for key in keys:\n if key != 'sent_type_1_f1-score':\n eval_metrics.pop(key)\n assert 'sent_type_1_f1-score' in eval_metrics\n\n sent_type_labels_list = \\\n processor.get_sent_type_labels(args.data_dir, logger)\n tags_sequence_labels_list = \\\n processor.get_sequence_labels(\n args.data_dir, logger=logger, sequence_type='tags_sequence'\n )\n relations_sequence_labels_list = \\\n processor.get_sequence_labels(\n args.data_dir, logger=logger, sequence_type='relations_sequence'\n )\n\n label2id = {\n 'sent_type': {\n label: i for i, label in enumerate(sent_type_labels_list)\n },\n 'tags_sequence': {\n label: i for i, label in enumerate(tags_sequence_labels_list, 1)\n },\n 'relations_sequence': {\n label: i for i, label in enumerate(relations_sequence_labels_list, 1)\n }\n }\n\n id2label = {\n 'sent_type': {\n i: label for i, label in enumerate(sent_type_labels_list)\n },\n 'tags_sequence': {\n i: label for i, label in enumerate(tags_sequence_labels_list, 1)\n },\n 'relations_sequence': {\n i: label for i, label in enumerate(relations_sequence_labels_list, 1)\n }\n }\n\n num_sent_type_labels = len(sent_type_labels_list)\n num_tags_sequence_labels = len(tags_sequence_labels_list) + 1\n num_relations_sequence_labels = len(relations_sequence_labels_list) + 1\n\n do_lower_case = 'uncased' in args.model\n tokenizer = tokenizers[args.model].from_pretrained(\n args.model, do_lower_case=do_lower_case\n )\n\n model_name = args.model\n\n if args.do_train:\n config = configs[args.model]\n config = config.from_pretrained(\n args.model,\n hidden_dropout_prob=args.dropout\n )\n model = models[model_name].from_pretrained(\n args.model, cache_dir=str(PYTORCH_PRETRAINED_BERT_CACHE),\n num_sent_type_labels=num_sent_type_labels,\n num_tags_sequence_labels=num_tags_sequence_labels,\n num_relations_sequence_labels=num_relations_sequence_labels,\n sent_type_clf_weight=args.sent_type_clf_weight,\n tags_sequence_clf_weight=args.tags_sequence_clf_weight,\n relations_sequence_clf_weight=args.relations_sequence_clf_weight,\n pooling_type=args.subtokens_pooling_type,\n config=config\n )\n print(\n \"task weights:\",\n model.sent_type_clf_weight,\n model.tags_sequence_clf_weight,\n model.relations_sequence_clf_weight\n )\n\n else:\n model = models[model_name].from_pretrained(\n dest_tmp_model_path,\n num_sent_type_labels=num_sent_type_labels,\n num_tags_sequence_labels=num_tags_sequence_labels,\n num_relations_sequence_labels=num_relations_sequence_labels,\n sent_type_clf_weight=args.sent_type_clf_weight,\n tags_sequence_clf_weight=args.tags_sequence_clf_weight,\n relations_sequence_clf_weight=args.relations_sequence_clf_weight,\n pooling_type=args.subtokens_pooling_type\n )\n\n model.to(device)\n\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features, eval_new_examples = model.convert_examples_to_features(\n eval_examples, label2id, args.max_seq_length,\n tokenizer, logger, args.sequence_mode, context_mode=args.context_mode\n )\n logger.info(\"***** Dev *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_dataloader, eval_sent_type_labels_ids, \\\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids = \\\n get_dataloader_and_tensors(eval_features, args.eval_batch_size)\n\n if not args.do_eval:\n test_file = os.path.join(\n args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n test_examples = processor.get_test_examples(test_file)\n\n test_features, test_new_examples = model.convert_examples_to_features(\n test_examples, label2id, args.max_seq_length,\n tokenizer, logger, args.sequence_mode, context_mode=args.context_mode\n )\n logger.info(\"***** Test *****\")\n logger.info(\" Num examples = %d\", len(test_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n test_dataloader, test_sent_type_labels_ids, \\\n test_tags_sequence_labels_ids, test_relations_sequence_labels_ids = \\\n get_dataloader_and_tensors(test_features, args.eval_batch_size)\n\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n train_features, _ = model.convert_examples_to_features(\n train_examples, label2id,\n args.max_seq_length, tokenizer, logger, args.sequence_mode,\n context_mode=args.context_mode\n )\n\n if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':\n train_features = sorted(\n train_features, key=lambda f: np.sum(f.input_mask)\n )\n else:\n random.shuffle(train_features)\n\n train_dataloader, sent_type_ids, tags_sequence_ids, \\\n relations_sequence_ids = \\\n get_dataloader_and_tensors(train_features, args.train_batch_size)\n train_batches = [batch for batch in train_dataloader]\n\n num_train_optimization_steps = \\\n len(train_dataloader) // args.gradient_accumulation_steps * \\\n args.num_train_epochs\n\n warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)\n\n logger.info(\"***** Training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n best_result = defaultdict(float)\n for eval_metric in eval_metrics:\n best_result[eval_metric] = args.threshold\n if eval_metric.startswith('sent_type'):\n best_result[eval_metric] += 0.2\n print('best results thresholds:')\n print(best_result)\n\n eval_step = max(1, len(train_batches) // args.eval_per_epoch)\n lr = float(args.learning_rate)\n\n # if n_gpu > 1:\n # model = torch.nn.DataParallel(model)\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in param_optimizer\n if not any(nd in name for nd in no_decay)\n ],\n 'weight_decay': float(args.weight_decay)\n },\n {\n 'params': [\n param for name, param in param_optimizer\n if any(nd in name for nd in no_decay)\n ],\n 'weight_decay': 0.0\n }\n ]\n\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=lr\n )\n if args.lr_schedule == 'constant_warmup':\n print('lr schedule = constant_warmup')\n scheduler = get_constant_schedule_with_warmup(\n optimizer,\n num_warmup_steps=warmup_steps\n )\n else:\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=warmup_steps,\n num_training_steps=num_train_optimization_steps\n )\n\n start_time = time.time()\n global_step = 0\n\n for epoch in range(1, 1 + int(args.num_train_epochs)):\n tr_loss = 0\n nb_tr_examples = 0\n nb_tr_steps = 0\n cur_train_loss = defaultdict(float)\n\n model.train()\n logger.info(\"Start epoch #{} (lr = {})...\".format(epoch, lr))\n if args.train_mode == 'random' or args.train_mode == 'random_sorted':\n random.shuffle(train_batches)\n\n for step, batch in enumerate(\n tqdm(\n train_batches, total=len(train_batches),\n desc='training ... '\n )\n ):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, \\\n sent_type_labels_ids, tags_sequence_labels_ids, \\\n relations_sequence_labels_ids, token_valid_pos_ids = batch\n train_loss = model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n sent_type_labels=sent_type_labels_ids,\n tags_sequence_labels=tags_sequence_labels_ids,\n relations_sequence_labels=relations_sequence_labels_ids,\n token_valid_pos_ids=token_valid_pos_ids,\n return_outputs=False,\n device=device\n )\n for key in train_loss:\n cur_train_loss[key] += train_loss[key].mean().item()\n\n loss_to_optimize = train_loss['weighted_loss']\n if n_gpu > 1:\n loss_to_optimize = loss_to_optimize.mean()\n\n if args.gradient_accumulation_steps > 1:\n loss_to_optimize = \\\n loss_to_optimize / args.gradient_accumulation_steps\n\n loss_to_optimize.backward()\n torch.nn.utils.clip_grad_norm_(\n model.parameters(),\n args.max_grad_norm\n )\n\n tr_loss += loss_to_optimize.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n global_step += 1\n\n if args.do_validate and (step + 1) % eval_step == 0:\n logger.info(\n 'Ep: {}, Stp: {}/{}, usd_t={:.2f}s, loss={:.6f}'.format(\n epoch, step + 1, len(train_batches),\n time.time() - start_time, tr_loss / nb_tr_steps\n )\n )\n predict_for_metrics = []\n cur_train_mean_loss = {}\n for key in cur_train_loss:\n cur_train_mean_loss[f'train_{key}'] = \\\n cur_train_loss[key] / nb_tr_steps\n\n preds, result, scores = evaluate(\n model, device, eval_dataloader,\n eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids,\n eval_relations_sequence_labels_ids,\n label2id, cur_train_mean_loss=cur_train_mean_loss,\n logger=eval_logger\n )\n\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'] = \\\n args.train_batch_size * args.gradient_accumulation_steps\n\n for key, value in result.items():\n dev_writer.add_scalar(key, value, global_step)\n for key,value in cur_train_mean_loss.items():\n train_writer.add_scalar(\n f'running_train_{key}', value, global_step\n )\n\n logger.info(\"First 20 predictions:\")\n for sent_type_pred, sent_type_label in zip(\n preds['sent_type'][:20],\n eval_sent_type_labels_ids.numpy()[:20]\n ):\n sign = u'\\u2713' \\\n if sent_type_pred == sent_type_label else u'\\u2718'\n logger.info(\n \"pred = %s, label = %s %s\" % (\n id2label['sent_type'][sent_type_pred],\n id2label['sent_type'][sent_type_label],\n sign\n )\n )\n\n for eval_metric in eval_metrics:\n if result[eval_metric] > best_result[eval_metric]:\n best_result[eval_metric] = result[eval_metric]\n logger.info(\"!!! Best dev %s (lr=%s, epoch=%d): %.2f\" %\n (\n eval_metric,\n str(lr), epoch,\n result[eval_metric] * 100.0\n )\n )\n predict_for_metrics.append(eval_metric)\n\n for metric_id, eval_metric in tqdm(\n enumerate(predict_for_metrics), total=len(predict_for_metrics),\n desc='writing predictions ... '\n ):\n dest_file = f'dev_best_{eval_metric}'\n write_predictions(\n args, eval_new_examples, eval_features, preds,\n scores, dest_file,\n label2id=label2id, id2label=id2label,\n metrics=result\n )\n if metric_id == 0:\n test_preds, test_result, test_scores = evaluate(\n model, device, test_dataloader,\n test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids,\n label2id, cur_train_mean_loss=None,\n logger=None\n )\n\n output_model_file = os.path.join(\n args.output_dir,\n f\"best_{eval_metric}_{WEIGHTS_NAME}\"\n )\n save_model(\n args, model, tokenizer, output_model_file\n )\n for metric in predict_for_metrics[1:]:\n dest_model_path = os.path.join(\n args.output_dir,\n f\"best_{metric}_{WEIGHTS_NAME}\"\n )\n os.system(\n f'cp {output_model_file} {dest_model_path}'\n )\n\n dest_file = f'test_best_{eval_metric}'\n write_predictions(\n args, test_new_examples, test_features, test_preds,\n test_scores, dest_file,\n label2id=label2id, id2label=id2label,\n metrics=test_result\n )\n\n\n if args.log_train_metrics:\n preds, result, scores = evaluate(\n model, device, train_dataloader,\n sent_type_ids,\n tags_sequence_ids,\n relations_sequence_ids,\n label2id, logger=logger,\n skip_every_n_examples=args.skip_every_n_examples\n )\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'] = \\\n args.train_batch_size * args.gradient_accumulation_steps\n\n for key, value in result.items():\n train_writer.add_scalar(key, value, global_step)\n\n if args.do_eval:\n test_files = os.path.join(\n args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n\n for test_file in test_files.split(\"8\"):\n test_examples = processor.get_test_examples(test_file)\n\n test_features, test_new_examples = model.convert_examples_to_features(\n test_examples, label2id, args.max_seq_length,\n tokenizer, logger, args.sequence_mode, context_mode=args.context_mode\n )\n logger.info(\"***** Test *****\")\n logger.info(\" Num examples = %d\", len(test_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n test_dataloader, test_sent_type_labels_ids, \\\n test_tags_sequence_labels_ids, test_relations_sequence_labels_ids = \\\n get_dataloader_and_tensors(test_features, args.eval_batch_size)\n\n preds, result, scores = evaluate(\n model, device, test_dataloader,\n test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids,\n label2id,\n compute_metrics=False\n )\n dest_file = args.model_prefix + test_file.split('/')[-1].replace('.json', '')\n write_predictions(\n args, test_new_examples, test_features,\n preds, scores, dest_file,\n label2id=label2id, id2label=id2label, metrics=result\n )\n\n if rm_model:\n shutil.rmtree(dest_tmp_model_path)\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = \\\n model.module if hasattr(model, 'module') else model\n\n output_config_file = os.path.join(\n args.output_dir, CONFIG_NAME\n )\n torch.save(\n model_to_save.state_dict(), output_model_file\n )\n model_to_save.config.to_json_file(\n output_config_file\n )\n tokenizer.save_vocabulary(args.output_dir)\n print(f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(\n args, examples, features, preds,\n scores, dest_file, label2id, id2label, metrics=None\n):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {\n 'tags_sequence': 'O',\n 'relations_sequence': '0'\n }\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [\n list(pred[orig_positions]) + \\\n [\n label2id[task][neg_label_mapper[task]]\n ] * (len(ex.tokens) - len(orig_positions))\n for pred, orig_positions, ex in zip(\n preds[task],\n orig_positions_map,\n examples\n )\n ]\n\n aggregated_results[f'{task}_scores'] = [\n list(score[orig_positions]) + \\\n [0.999] * (len(ex.tokens) - len(orig_positions))\n for score, orig_positions, ex in zip(\n scores[task],\n orig_positions_map,\n examples\n )\n ]\n\n prediction_results = {\n 'idx': [\n ex.guid for ex in examples\n ],\n 'tokens': [\n ' '.join(ex.tokens) for ex in examples\n ],\n 'sent_type_label': [\n ex.sent_type for ex in examples\n ],\n 'sent_type_pred': [\n id2label['sent_type'][x] for x in preds['sent_type']\n ],\n 'sent_type_scores': [\n str(score) for score in scores['sent_type']\n ],\n 'sent_start': [\n ex.sent_start for ex in examples\n ],\n 'sent_end': [\n ex.sent_end for ex in examples\n ],\n 'tags_sequence_labels': [\n ' '.join(ex.tags_sequence) for ex in examples\n ],\n 'tags_sequence_pred': [\n ' '.join([id2label['tags_sequence'][x] if x != 0 else 'O' for x in sent])\n for sent in aggregated_results['tags_sequence']\n ],\n 'tags_sequence_scores': [\n ' '.join([str(score) for score in sent])\n for sent in aggregated_results['tags_sequence_scores']\n ],\n 'tags_ids': [\n ' '.join(ex.tags_ids) for ex in examples\n ],\n 'relations_sequence_labels': [\n ' '.join(ex.relations_sequence) for ex in examples\n ],\n 'relations_sequence_pred': [\n ' '.join([id2label['relations_sequence'][x] if x != 0 else '0' for x in sent])\n for sent in aggregated_results['relations_sequence']\n ],\n 'relations_sequence_scores': [\n ' '.join([str(score) for score in sent])\n for sent in aggregated_results['relations_sequence_scores']\n ],\n 'subj_start': [\n ex.subj_start for ex in examples\n ],\n 'subj_end': [\n ex.subj_end for ex in examples\n ],\n 'infile_offsets': [\n ' '.join([\n str(offset) for offset in ex.infile_offsets\n ]) for ex in examples\n ],\n 'start_char': [\n ' '.join(ex.start_char) for ex in examples\n ],\n 'end_char': [\n ' '.join(ex.end_char) for ex in examples\n ],\n 'source': [\n ex.source for ex in examples\n ]\n }\n\n prediction_results = pd.DataFrame(prediction_results)\n\n prediction_results.to_csv(\n os.path.join(\n args.output_dir,\n f\"{dest_file}.tsv\"),\n sep='\\t', index=False\n )\n\n if metrics is not None:\n with open(\n os.path.join(\n args.output_dir,\n f\"{dest_file}_eval_results.txt\"\n ), \"w\"\n ) as f:\n for key in sorted(metrics.keys()):\n f.write(\"%s = %s\\n\" % (key, str(metrics[key])))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--test_file\", default='', type=str, required=False)\n parser.add_argument(\"--model\", default='bert-large-uncased', type=str, required=True)\n parser.add_argument(\"--data_dir\", default='data', type=str, required=True,\n help=\"The input data dir. Should contain the .json files for the task.\")\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n parser.add_argument(\"--eval_per_epoch\", default=4, type=int,\n help=\"How many times to do validation on dev set per epoch\")\n parser.add_argument(\"--max_seq_length\", default=256, type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization.\\n\"\n \"Sequences longer than this will be truncated, and sequences shorter\\n\"\n \"than this will be padded.\")\n\n parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")\n parser.add_argument(\"--train_mode\", type=str, default='random_sorted',\n choices=['random', 'sorted', 'random_sorted'])\n parser.add_argument(\"--do_validate\", action='store_true', help=\"Whether to run validation on dev set.\")\n\n parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the test set.\")\n parser.add_argument(\"--train_batch_size\", default=32, type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\", default=8, type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\n \"--eval_metrics\",\n default=\"+\".join([\n \"sent_type_1_f1-score\",\n \"tags_sequence_macro-avg_f1-score\",\n \"relations_sequence_macro-avg_f1-score\"\n ]),\n type=str\n )\n parser.add_argument(\"--learning_rate\", default=1e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=6.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\", default=0.1, type=float,\n help=\"Proportion of training to perform linear learning rate warmup.\\n\"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"maximal gradient norm\")\n\n parser.add_argument(\"--sent_type_clf_weight\", default=1.0, type=float,\n help=\"the weight of task 1\")\n parser.add_argument(\"--tags_sequence_clf_weight\", default=1.0, type=float,\n help=\"The weight of task 2\")\n parser.add_argument(\"--relations_sequence_clf_weight\", default=1.0, type=float,\n help=\"The weight of task 3\")\n\n parser.add_argument(\"--weight_decay\", default=0.1, type=float,\n help=\"weight_decay coefficient for regularization\")\n parser.add_argument(\"--dropout\", default=0.1, type=float,\n help=\"dropout rate\")\n\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=8,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--filter_task_3\", action=\"store_true\",\n help=\"exclude task 3 from training\")\n parser.add_argument(\"--filter_task_1\", action=\"store_true\",\n help=\"exclude task 1 from training\")\n\n parser.add_argument(\"--subtokens_pooling_type\", type=str, default=\"first\",\n help=\"pooling mode in bert-ner, one of avg or first\")\n parser.add_argument(\"--sequence_mode\", type=str, default=\"not-all\",\n help=\"train to predict for all subtokens or not\"\n \"all or not-all\")\n parser.add_argument(\"--context_mode\", type=str, default=\"full\",\n help=\"context for task 1: one from center, full, left, right\")\n parser.add_argument(\"--lr_schedule\", type=str, default=\"linear_warmup\",\n help=\"lr adjustment schedule\")\n parser.add_argument(\"--log_train_metrics\", action=\"store_true\",\n help=\"compute metrics for train set too\")\n parser.add_argument(\"--threshold\", type=float, default=0.30,\n help=\"threshold for best models to save\")\n parser.add_argument(\"--output_dirs_to_exclude\", type=str, default='',\n help=\"path to json file containing list of output\" + \\\n \" dirs to exclude fome trainig\")\n parser.add_argument(\"--skip_every_n_examples\", type=int, default=30,\n help=\"number examples in train set to skip in evaluating metrics\")\n parser.add_argument(\"--model_prefix\", type=str, default='best_sent_type_1_f1-score_',\n help=\"pefix of the model weight\")\n\n parsed_args = parser.parse_args()\n main(parsed_args)\n",
"import argparse\nimport logging\nimport os\nimport random\nimport time\nimport json\nfrom datetime import datetime\nimport tempfile\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom scipy.special import softmax\nfrom torch.nn import CrossEntropyLoss\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup, get_constant_schedule_with_warmup\nfrom transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME\nfrom tqdm import tqdm\nfrom models.examples_to_features import get_dataloader_and_tensors, models, tokenizers, DataProcessor, configs\nfrom collections import defaultdict\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\nfrom torch.nn import CrossEntropyLoss\nfrom utils.data_processing import EVAL_TAGS, EVAL_RELATIONS\nfrom torch.utils.tensorboard import SummaryWriter\nlogging.basicConfig(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\nlogger = logging.getLogger(__name__)\neval_logger = logging.getLogger('__scores__')\n\n\ndef compute_all_metrics(sent_type_labels, sent_type_preds,\n tags_sequence_labels, tags_sequence_preds, relations_sequence_labels,\n relations_sequence_preds, label2id, loss_info=None, logger=None):\n eval_tags_sequence_labels = [label2id['tags_sequence'][lab] for lab in\n EVAL_TAGS]\n eval_relations_sequence_labels = [label2id['relations_sequence'][lab] for\n lab in EVAL_RELATIONS]\n task_1_report = classification_report(sent_type_labels, sent_type_preds,\n labels=[0, 1], output_dict=True)\n task_2_report = classification_report(tags_sequence_labels,\n tags_sequence_preds, labels=eval_tags_sequence_labels, output_dict=True\n )\n task_3_report = classification_report(relations_sequence_labels,\n relations_sequence_preds, labels=eval_relations_sequence_labels,\n output_dict=True)\n result = {}\n for x in ['0', '1', 'weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"sent_type_{x.replace(' ', '-')}_{metrics}\"] = round(\n task_1_report[x][metrics], 6)\n id2label = {val: key for key, val in label2id['tags_sequence'].items()}\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_tags_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'tags_sequence_{id2label[x]}_{metrics}'] = round(\n task_2_report[str(x)][metrics], 6)\n id2label = {val: key for key, val in label2id['relations_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_relations_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'relations_sequence_{id2label[x]}_{metrics}'] = round(\n task_3_report[str(x)][metrics], 6)\n if logger is not None:\n logger.info('=====================================')\n for key in sorted(result.keys()):\n logger.info(' %s = %s', key, str(result[key]))\n if loss_info is not None:\n for key in sorted(loss_info.keys()):\n logger.info(' %s = %s', key, str(loss_info[key]))\n return result\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\ndef main(args):\n if os.path.exists(args.output_dir) and args.do_train:\n from glob import glob\n tsv_files = glob(os.path.join(args.output_dir, '*best*tsv'))\n if tsv_files:\n print('already computed: skipping ...')\n return\n else:\n print(\n f'already existing {args.output_dir}. but without weight file and tsv files ...'\n )\n os.system(f'rm -r {args.output_dir}')\n assert args.context_mode in ['full', 'center', 'left', 'right']\n source_model = os.path.join(args.output_dir,\n f'{args.model_prefix}pytorch_model.bin')\n dest_model = os.path.join(args.output_dir, 'pytorch_model.bin')\n rm_model = False\n if args.do_eval:\n if not os.path.exists(source_model):\n print(f'returning ... not found {source_model}')\n return\n if source_model != dest_model:\n rm_model = True\n dest_tmp_model_path = tempfile.mkdtemp()\n os.system(\n f\"cp {source_model} {os.path.join(dest_tmp_model_path, 'pytorch_model.bin')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'config.json')} {os.path.join(dest_tmp_model_path, 'config.json')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'vocab.txt')} {os.path.join(dest_tmp_model_path, 'vocab.txt')}\"\n )\n else:\n dest_tmp_model_path = args.output_dir\n device = torch.device('cuda' if torch.cuda.is_available() and not args.\n no_cuda else 'cpu')\n n_gpu = torch.cuda.device_count()\n if args.gradient_accumulation_steps < 1:\n raise ValueError('gradient_accumulation_steps parameter should be >= 1'\n )\n args.train_batch_size = (args.train_batch_size // args.\n gradient_accumulation_steps)\n if args.do_train:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n if not args.do_train and not args.do_eval:\n raise ValueError(\n 'At least one of `do_train` or `do_eval` must be True.')\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n elif args.do_train or args.do_validate:\n raise ValueError(args.output_dir, 'output_dir already exists')\n suffix = datetime.now().isoformat().replace('-', '_').replace(':', '_'\n ).split('.')[0].replace('T', '-')\n if args.do_train:\n train_writer = SummaryWriter(log_dir=os.path.join('tensorboard',\n args.output_dir, 'train'))\n dev_writer = SummaryWriter(log_dir=os.path.join('tensorboard', args\n .output_dir, 'dev'))\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'train_{suffix}.log'), 'w'))\n eval_logger.addHandler(logging.FileHandler(os.path.join(args.\n output_dir, f'scores_{suffix}.log'), 'w'))\n else:\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'eval_{suffix}.log'), 'w'))\n logger.info(args)\n logger.info('device: {}, n_gpu: {}'.format(device, n_gpu))\n processor = DataProcessor(filter_task_1=args.filter_task_1,\n filter_task_3=args.filter_task_3)\n eval_metrics = {eval_metric: (True) for eval_metric in args.\n eval_metrics.split('+')}\n if args.filter_task_1 and args.do_train:\n assert args.sent_type_clf_weight == 0.0\n eval_metrics.pop('sent_type_1_f1-score')\n if args.filter_task_3 and args.do_train:\n assert args.relations_sequence_clf_weight == 0.0\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n if (args.sent_type_clf_weight == 0.0 and 'sent_type_1_f1-score' in\n eval_metrics):\n eval_metrics.pop('sent_type_1_f1-score')\n if (args.tags_sequence_clf_weight == 0.0 and \n 'tags_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('tags_sequence_macro-avg_f1-score')\n if (args.relations_sequence_clf_weight == 0.0 and \n 'relations_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n assert len(eval_metrics) > 0, 'inconsistent train params'\n if args.context_mode != 'full':\n keys = list(eval_metrics.keys())\n for key in keys:\n if key != 'sent_type_1_f1-score':\n eval_metrics.pop(key)\n assert 'sent_type_1_f1-score' in eval_metrics\n sent_type_labels_list = processor.get_sent_type_labels(args.data_dir,\n logger)\n tags_sequence_labels_list = processor.get_sequence_labels(args.data_dir,\n logger=logger, sequence_type='tags_sequence')\n relations_sequence_labels_list = processor.get_sequence_labels(args.\n data_dir, logger=logger, sequence_type='relations_sequence')\n label2id = {'sent_type': {label: i for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {label: i for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {\n label: i for i, label in enumerate(relations_sequence_labels_list, 1)}}\n id2label = {'sent_type': {i: label for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {i: label for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {i:\n label for i, label in enumerate(relations_sequence_labels_list, 1)}}\n num_sent_type_labels = len(sent_type_labels_list)\n num_tags_sequence_labels = len(tags_sequence_labels_list) + 1\n num_relations_sequence_labels = len(relations_sequence_labels_list) + 1\n do_lower_case = 'uncased' in args.model\n tokenizer = tokenizers[args.model].from_pretrained(args.model,\n do_lower_case=do_lower_case)\n model_name = args.model\n if args.do_train:\n config = configs[args.model]\n config = config.from_pretrained(args.model, hidden_dropout_prob=\n args.dropout)\n model = models[model_name].from_pretrained(args.model, cache_dir=\n str(PYTORCH_PRETRAINED_BERT_CACHE), num_sent_type_labels=\n num_sent_type_labels, num_tags_sequence_labels=\n num_tags_sequence_labels, num_relations_sequence_labels=\n num_relations_sequence_labels, sent_type_clf_weight=args.\n sent_type_clf_weight, tags_sequence_clf_weight=args.\n tags_sequence_clf_weight, relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type, config=config)\n print('task weights:', model.sent_type_clf_weight, model.\n tags_sequence_clf_weight, model.relations_sequence_clf_weight)\n else:\n model = models[model_name].from_pretrained(dest_tmp_model_path,\n num_sent_type_labels=num_sent_type_labels,\n num_tags_sequence_labels=num_tags_sequence_labels,\n num_relations_sequence_labels=num_relations_sequence_labels,\n sent_type_clf_weight=args.sent_type_clf_weight,\n tags_sequence_clf_weight=args.tags_sequence_clf_weight,\n relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type)\n model.to(device)\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features, eval_new_examples = model.convert_examples_to_features(\n eval_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Dev *****')\n logger.info(' Num examples = %d', len(eval_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids\n ) = get_dataloader_and_tensors(eval_features, args.eval_batch_size)\n if not args.do_eval:\n test_file = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = model.convert_examples_to_features(\n test_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids, test_relations_sequence_labels_ids\n ) = (get_dataloader_and_tensors(test_features, args.\n eval_batch_size))\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n train_features, _ = model.convert_examples_to_features(train_examples,\n label2id, args.max_seq_length, tokenizer, logger, args.\n sequence_mode, context_mode=args.context_mode)\n if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':\n train_features = sorted(train_features, key=lambda f: np.sum(f.\n input_mask))\n else:\n random.shuffle(train_features)\n (train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids) = (get_dataloader_and_tensors(\n train_features, args.train_batch_size))\n train_batches = [batch for batch in train_dataloader]\n num_train_optimization_steps = len(train_dataloader\n ) // args.gradient_accumulation_steps * args.num_train_epochs\n warmup_steps = int(args.warmup_proportion *\n num_train_optimization_steps)\n logger.info('***** Training *****')\n logger.info(' Num examples = %d', len(train_examples))\n logger.info(' Batch size = %d', args.train_batch_size)\n logger.info(' Num steps = %d', num_train_optimization_steps)\n best_result = defaultdict(float)\n for eval_metric in eval_metrics:\n best_result[eval_metric] = args.threshold\n if eval_metric.startswith('sent_type'):\n best_result[eval_metric] += 0.2\n print('best results thresholds:')\n print(best_result)\n eval_step = max(1, len(train_batches) // args.eval_per_epoch)\n lr = float(args.learning_rate)\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [{'params': [param for name, param in\n param_optimizer if not any(nd in name for nd in no_decay)],\n 'weight_decay': float(args.weight_decay)}, {'params': [param for\n name, param in param_optimizer if any(nd in name for nd in\n no_decay)], 'weight_decay': 0.0}]\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n if args.lr_schedule == 'constant_warmup':\n print('lr schedule = constant_warmup')\n scheduler = get_constant_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps)\n else:\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=\n num_train_optimization_steps)\n start_time = time.time()\n global_step = 0\n for epoch in range(1, 1 + int(args.num_train_epochs)):\n tr_loss = 0\n nb_tr_examples = 0\n nb_tr_steps = 0\n cur_train_loss = defaultdict(float)\n model.train()\n logger.info('Start epoch #{} (lr = {})...'.format(epoch, lr))\n if (args.train_mode == 'random' or args.train_mode ==\n 'random_sorted'):\n random.shuffle(train_batches)\n for step, batch in enumerate(tqdm(train_batches, total=len(\n train_batches), desc='training ... ')):\n batch = tuple(t.to(device) for t in batch)\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n train_loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask,\n sent_type_labels=sent_type_labels_ids,\n tags_sequence_labels=tags_sequence_labels_ids,\n relations_sequence_labels=relations_sequence_labels_ids,\n token_valid_pos_ids=token_valid_pos_ids, return_outputs\n =False, device=device)\n for key in train_loss:\n cur_train_loss[key] += train_loss[key].mean().item()\n loss_to_optimize = train_loss['weighted_loss']\n if n_gpu > 1:\n loss_to_optimize = loss_to_optimize.mean()\n if args.gradient_accumulation_steps > 1:\n loss_to_optimize = (loss_to_optimize / args.\n gradient_accumulation_steps)\n loss_to_optimize.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.\n max_grad_norm)\n tr_loss += loss_to_optimize.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n global_step += 1\n if args.do_validate and (step + 1) % eval_step == 0:\n logger.info(\n 'Ep: {}, Stp: {}/{}, usd_t={:.2f}s, loss={:.6f}'.\n format(epoch, step + 1, len(train_batches), time.\n time() - start_time, tr_loss / nb_tr_steps))\n predict_for_metrics = []\n cur_train_mean_loss = {}\n for key in cur_train_loss:\n cur_train_mean_loss[f'train_{key}'] = cur_train_loss[\n key] / nb_tr_steps\n preds, result, scores = evaluate(model, device,\n eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids,\n eval_relations_sequence_labels_ids, label2id,\n cur_train_mean_loss=cur_train_mean_loss, logger=\n eval_logger)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'] = (args.train_batch_size * args.\n gradient_accumulation_steps)\n for key, value in result.items():\n dev_writer.add_scalar(key, value, global_step)\n for key, value in cur_train_mean_loss.items():\n train_writer.add_scalar(f'running_train_{key}',\n value, global_step)\n logger.info('First 20 predictions:')\n for sent_type_pred, sent_type_label in zip(preds[\n 'sent_type'][:20], eval_sent_type_labels_ids.numpy(\n )[:20]):\n sign = (u'✓' if sent_type_pred == sent_type_label else\n u'✘')\n logger.info('pred = %s, label = %s %s' % (id2label[\n 'sent_type'][sent_type_pred], id2label[\n 'sent_type'][sent_type_label], sign))\n for eval_metric in eval_metrics:\n if result[eval_metric] > best_result[eval_metric]:\n best_result[eval_metric] = result[eval_metric]\n logger.info(\n '!!! Best dev %s (lr=%s, epoch=%d): %.2f' %\n (eval_metric, str(lr), epoch, result[\n eval_metric] * 100.0))\n predict_for_metrics.append(eval_metric)\n for metric_id, eval_metric in tqdm(enumerate(\n predict_for_metrics), total=len(predict_for_metrics\n ), desc='writing predictions ... '):\n dest_file = f'dev_best_{eval_metric}'\n write_predictions(args, eval_new_examples,\n eval_features, preds, scores, dest_file,\n label2id=label2id, id2label=id2label, metrics=\n result)\n if metric_id == 0:\n test_preds, test_result, test_scores = evaluate(\n model, device, test_dataloader,\n test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids,\n label2id, cur_train_mean_loss=None, logger=None\n )\n output_model_file = os.path.join(args.\n output_dir,\n f'best_{eval_metric}_{WEIGHTS_NAME}')\n save_model(args, model, tokenizer,\n output_model_file)\n for metric in predict_for_metrics[1:]:\n dest_model_path = os.path.join(args.\n output_dir, f'best_{metric}_{WEIGHTS_NAME}'\n )\n os.system(\n f'cp {output_model_file} {dest_model_path}'\n )\n dest_file = f'test_best_{eval_metric}'\n write_predictions(args, test_new_examples,\n test_features, test_preds, test_scores,\n dest_file, label2id=label2id, id2label=id2label,\n metrics=test_result)\n if args.log_train_metrics:\n preds, result, scores = evaluate(model, device,\n train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids, label2id, logger=logger,\n skip_every_n_examples=args.skip_every_n_examples)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'\n ] = args.train_batch_size * args.gradient_accumulation_steps\n for key, value in result.items():\n train_writer.add_scalar(key, value, global_step)\n if args.do_eval:\n test_files = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n for test_file in test_files.split('8'):\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = (model.\n convert_examples_to_features(test_examples, label2id, args.\n max_seq_length, tokenizer, logger, args.sequence_mode,\n context_mode=args.context_mode))\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids) = (\n get_dataloader_and_tensors(test_features, args.eval_batch_size)\n )\n preds, result, scores = evaluate(model, device, test_dataloader,\n test_sent_type_labels_ids, test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids, label2id,\n compute_metrics=False)\n dest_file = args.model_prefix + test_file.split('/')[-1].replace(\n '.json', '')\n write_predictions(args, test_new_examples, test_features, preds,\n scores, dest_file, label2id=label2id, id2label=id2label,\n metrics=result)\n if rm_model:\n shutil.rmtree(dest_tmp_model_path)\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(args, examples, features, preds, scores, dest_file,\n label2id, id2label, metrics=None):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {'tags_sequence': 'O', 'relations_sequence': '0'}\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [(list(pred[orig_positions]) + [label2id\n [task][neg_label_mapper[task]]] * (len(ex.tokens) - len(\n orig_positions))) for pred, orig_positions, ex in zip(preds[\n task], orig_positions_map, examples)]\n aggregated_results[f'{task}_scores'] = [(list(score[orig_positions]\n ) + [0.999] * (len(ex.tokens) - len(orig_positions))) for score,\n orig_positions, ex in zip(scores[task], orig_positions_map,\n examples)]\n prediction_results = {'idx': [ex.guid for ex in examples], 'tokens': [\n ' '.join(ex.tokens) for ex in examples], 'sent_type_label': [ex.\n sent_type for ex in examples], 'sent_type_pred': [id2label[\n 'sent_type'][x] for x in preds['sent_type']], 'sent_type_scores': [\n str(score) for score in scores['sent_type']], 'sent_start': [ex.\n sent_start for ex in examples], 'sent_end': [ex.sent_end for ex in\n examples], 'tags_sequence_labels': [' '.join(ex.tags_sequence) for\n ex in examples], 'tags_sequence_pred': [' '.join([(id2label[\n 'tags_sequence'][x] if x != 0 else 'O') for x in sent]) for sent in\n aggregated_results['tags_sequence']], 'tags_sequence_scores': [' '.\n join([str(score) for score in sent]) for sent in aggregated_results\n ['tags_sequence_scores']], 'tags_ids': [' '.join(ex.tags_ids) for\n ex in examples], 'relations_sequence_labels': [' '.join(ex.\n relations_sequence) for ex in examples], 'relations_sequence_pred':\n [' '.join([(id2label['relations_sequence'][x] if x != 0 else '0') for\n x in sent]) for sent in aggregated_results['relations_sequence']],\n 'relations_sequence_scores': [' '.join([str(score) for score in\n sent]) for sent in aggregated_results['relations_sequence_scores']],\n 'subj_start': [ex.subj_start for ex in examples], 'subj_end': [ex.\n subj_end for ex in examples], 'infile_offsets': [' '.join([str(\n offset) for offset in ex.infile_offsets]) for ex in examples],\n 'start_char': [' '.join(ex.start_char) for ex in examples],\n 'end_char': [' '.join(ex.end_char) for ex in examples], 'source': [\n ex.source for ex in examples]}\n prediction_results = pd.DataFrame(prediction_results)\n prediction_results.to_csv(os.path.join(args.output_dir,\n f'{dest_file}.tsv'), sep='\\t', index=False)\n if metrics is not None:\n with open(os.path.join(args.output_dir,\n f'{dest_file}_eval_results.txt'), 'w') as f:\n for key in sorted(metrics.keys()):\n f.write('%s = %s\\n' % (key, str(metrics[key])))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_file', default='', type=str, required=False)\n parser.add_argument('--model', default='bert-large-uncased', type=str,\n required=True)\n parser.add_argument('--data_dir', default='data', type=str, required=\n True, help=\n 'The input data dir. Should contain the .json files for the task.')\n parser.add_argument('--output_dir', default=None, type=str, required=\n True, help=\n 'The output directory where the model predictions and checkpoints will be written.'\n )\n parser.add_argument('--eval_per_epoch', default=4, type=int, help=\n 'How many times to do validation on dev set per epoch')\n parser.add_argument('--max_seq_length', default=256, type=int, help=\n \"\"\"The maximum total input sequence length after WordPiece tokenization.\nSequences longer than this will be truncated, and sequences shorter\nthan this will be padded.\"\"\"\n )\n parser.add_argument('--do_train', action='store_true', help=\n 'Whether to run training.')\n parser.add_argument('--train_mode', type=str, default='random_sorted',\n choices=['random', 'sorted', 'random_sorted'])\n parser.add_argument('--do_validate', action='store_true', help=\n 'Whether to run validation on dev set.')\n parser.add_argument('--do_eval', action='store_true', help=\n 'Whether to run eval on the test set.')\n parser.add_argument('--train_batch_size', default=32, type=int, help=\n 'Total batch size for training.')\n parser.add_argument('--eval_batch_size', default=8, type=int, help=\n 'Total batch size for eval.')\n parser.add_argument('--eval_metrics', default='+'.join([\n 'sent_type_1_f1-score', 'tags_sequence_macro-avg_f1-score',\n 'relations_sequence_macro-avg_f1-score']), type=str)\n parser.add_argument('--learning_rate', default=1e-05, type=float, help=\n 'The initial learning rate for Adam.')\n parser.add_argument('--num_train_epochs', default=6.0, type=float, help\n ='Total number of training epochs to perform.')\n parser.add_argument('--warmup_proportion', default=0.1, type=float,\n help=\n \"\"\"Proportion of training to perform linear learning rate warmup.\nE.g., 0.1 = 10%% of training.\"\"\"\n )\n parser.add_argument('--max_grad_norm', default=1.0, type=float, help=\n 'maximal gradient norm')\n parser.add_argument('--sent_type_clf_weight', default=1.0, type=float,\n help='the weight of task 1')\n parser.add_argument('--tags_sequence_clf_weight', default=1.0, type=\n float, help='The weight of task 2')\n parser.add_argument('--relations_sequence_clf_weight', default=1.0,\n type=float, help='The weight of task 3')\n parser.add_argument('--weight_decay', default=0.1, type=float, help=\n 'weight_decay coefficient for regularization')\n parser.add_argument('--dropout', default=0.1, type=float, help=\n 'dropout rate')\n parser.add_argument('--no_cuda', action='store_true', help=\n 'Whether not to use CUDA when available')\n parser.add_argument('--seed', type=int, default=42, help=\n 'random seed for initialization')\n parser.add_argument('--gradient_accumulation_steps', type=int, default=\n 8, help=\n 'Number of updates steps to accumulate before performing a backward/update pass.'\n )\n parser.add_argument('--filter_task_3', action='store_true', help=\n 'exclude task 3 from training')\n parser.add_argument('--filter_task_1', action='store_true', help=\n 'exclude task 1 from training')\n parser.add_argument('--subtokens_pooling_type', type=str, default=\n 'first', help='pooling mode in bert-ner, one of avg or first')\n parser.add_argument('--sequence_mode', type=str, default='not-all',\n help='train to predict for all subtokens or notall or not-all')\n parser.add_argument('--context_mode', type=str, default='full', help=\n 'context for task 1: one from center, full, left, right')\n parser.add_argument('--lr_schedule', type=str, default='linear_warmup',\n help='lr adjustment schedule')\n parser.add_argument('--log_train_metrics', action='store_true', help=\n 'compute metrics for train set too')\n parser.add_argument('--threshold', type=float, default=0.3, help=\n 'threshold for best models to save')\n parser.add_argument('--output_dirs_to_exclude', type=str, default='',\n help='path to json file containing list of output' +\n ' dirs to exclude fome trainig')\n parser.add_argument('--skip_every_n_examples', type=int, default=30,\n help='number examples in train set to skip in evaluating metrics')\n parser.add_argument('--model_prefix', type=str, default=\n 'best_sent_type_1_f1-score_', help='pefix of the model weight')\n parsed_args = parser.parse_args()\n main(parsed_args)\n",
"<import token>\nlogging.basicConfig(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\nlogger = logging.getLogger(__name__)\neval_logger = logging.getLogger('__scores__')\n\n\ndef compute_all_metrics(sent_type_labels, sent_type_preds,\n tags_sequence_labels, tags_sequence_preds, relations_sequence_labels,\n relations_sequence_preds, label2id, loss_info=None, logger=None):\n eval_tags_sequence_labels = [label2id['tags_sequence'][lab] for lab in\n EVAL_TAGS]\n eval_relations_sequence_labels = [label2id['relations_sequence'][lab] for\n lab in EVAL_RELATIONS]\n task_1_report = classification_report(sent_type_labels, sent_type_preds,\n labels=[0, 1], output_dict=True)\n task_2_report = classification_report(tags_sequence_labels,\n tags_sequence_preds, labels=eval_tags_sequence_labels, output_dict=True\n )\n task_3_report = classification_report(relations_sequence_labels,\n relations_sequence_preds, labels=eval_relations_sequence_labels,\n output_dict=True)\n result = {}\n for x in ['0', '1', 'weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"sent_type_{x.replace(' ', '-')}_{metrics}\"] = round(\n task_1_report[x][metrics], 6)\n id2label = {val: key for key, val in label2id['tags_sequence'].items()}\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_tags_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'tags_sequence_{id2label[x]}_{metrics}'] = round(\n task_2_report[str(x)][metrics], 6)\n id2label = {val: key for key, val in label2id['relations_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_relations_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'relations_sequence_{id2label[x]}_{metrics}'] = round(\n task_3_report[str(x)][metrics], 6)\n if logger is not None:\n logger.info('=====================================')\n for key in sorted(result.keys()):\n logger.info(' %s = %s', key, str(result[key]))\n if loss_info is not None:\n for key in sorted(loss_info.keys()):\n logger.info(' %s = %s', key, str(loss_info[key]))\n return result\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\ndef main(args):\n if os.path.exists(args.output_dir) and args.do_train:\n from glob import glob\n tsv_files = glob(os.path.join(args.output_dir, '*best*tsv'))\n if tsv_files:\n print('already computed: skipping ...')\n return\n else:\n print(\n f'already existing {args.output_dir}. but without weight file and tsv files ...'\n )\n os.system(f'rm -r {args.output_dir}')\n assert args.context_mode in ['full', 'center', 'left', 'right']\n source_model = os.path.join(args.output_dir,\n f'{args.model_prefix}pytorch_model.bin')\n dest_model = os.path.join(args.output_dir, 'pytorch_model.bin')\n rm_model = False\n if args.do_eval:\n if not os.path.exists(source_model):\n print(f'returning ... not found {source_model}')\n return\n if source_model != dest_model:\n rm_model = True\n dest_tmp_model_path = tempfile.mkdtemp()\n os.system(\n f\"cp {source_model} {os.path.join(dest_tmp_model_path, 'pytorch_model.bin')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'config.json')} {os.path.join(dest_tmp_model_path, 'config.json')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'vocab.txt')} {os.path.join(dest_tmp_model_path, 'vocab.txt')}\"\n )\n else:\n dest_tmp_model_path = args.output_dir\n device = torch.device('cuda' if torch.cuda.is_available() and not args.\n no_cuda else 'cpu')\n n_gpu = torch.cuda.device_count()\n if args.gradient_accumulation_steps < 1:\n raise ValueError('gradient_accumulation_steps parameter should be >= 1'\n )\n args.train_batch_size = (args.train_batch_size // args.\n gradient_accumulation_steps)\n if args.do_train:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n if not args.do_train and not args.do_eval:\n raise ValueError(\n 'At least one of `do_train` or `do_eval` must be True.')\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n elif args.do_train or args.do_validate:\n raise ValueError(args.output_dir, 'output_dir already exists')\n suffix = datetime.now().isoformat().replace('-', '_').replace(':', '_'\n ).split('.')[0].replace('T', '-')\n if args.do_train:\n train_writer = SummaryWriter(log_dir=os.path.join('tensorboard',\n args.output_dir, 'train'))\n dev_writer = SummaryWriter(log_dir=os.path.join('tensorboard', args\n .output_dir, 'dev'))\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'train_{suffix}.log'), 'w'))\n eval_logger.addHandler(logging.FileHandler(os.path.join(args.\n output_dir, f'scores_{suffix}.log'), 'w'))\n else:\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'eval_{suffix}.log'), 'w'))\n logger.info(args)\n logger.info('device: {}, n_gpu: {}'.format(device, n_gpu))\n processor = DataProcessor(filter_task_1=args.filter_task_1,\n filter_task_3=args.filter_task_3)\n eval_metrics = {eval_metric: (True) for eval_metric in args.\n eval_metrics.split('+')}\n if args.filter_task_1 and args.do_train:\n assert args.sent_type_clf_weight == 0.0\n eval_metrics.pop('sent_type_1_f1-score')\n if args.filter_task_3 and args.do_train:\n assert args.relations_sequence_clf_weight == 0.0\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n if (args.sent_type_clf_weight == 0.0 and 'sent_type_1_f1-score' in\n eval_metrics):\n eval_metrics.pop('sent_type_1_f1-score')\n if (args.tags_sequence_clf_weight == 0.0 and \n 'tags_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('tags_sequence_macro-avg_f1-score')\n if (args.relations_sequence_clf_weight == 0.0 and \n 'relations_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n assert len(eval_metrics) > 0, 'inconsistent train params'\n if args.context_mode != 'full':\n keys = list(eval_metrics.keys())\n for key in keys:\n if key != 'sent_type_1_f1-score':\n eval_metrics.pop(key)\n assert 'sent_type_1_f1-score' in eval_metrics\n sent_type_labels_list = processor.get_sent_type_labels(args.data_dir,\n logger)\n tags_sequence_labels_list = processor.get_sequence_labels(args.data_dir,\n logger=logger, sequence_type='tags_sequence')\n relations_sequence_labels_list = processor.get_sequence_labels(args.\n data_dir, logger=logger, sequence_type='relations_sequence')\n label2id = {'sent_type': {label: i for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {label: i for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {\n label: i for i, label in enumerate(relations_sequence_labels_list, 1)}}\n id2label = {'sent_type': {i: label for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {i: label for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {i:\n label for i, label in enumerate(relations_sequence_labels_list, 1)}}\n num_sent_type_labels = len(sent_type_labels_list)\n num_tags_sequence_labels = len(tags_sequence_labels_list) + 1\n num_relations_sequence_labels = len(relations_sequence_labels_list) + 1\n do_lower_case = 'uncased' in args.model\n tokenizer = tokenizers[args.model].from_pretrained(args.model,\n do_lower_case=do_lower_case)\n model_name = args.model\n if args.do_train:\n config = configs[args.model]\n config = config.from_pretrained(args.model, hidden_dropout_prob=\n args.dropout)\n model = models[model_name].from_pretrained(args.model, cache_dir=\n str(PYTORCH_PRETRAINED_BERT_CACHE), num_sent_type_labels=\n num_sent_type_labels, num_tags_sequence_labels=\n num_tags_sequence_labels, num_relations_sequence_labels=\n num_relations_sequence_labels, sent_type_clf_weight=args.\n sent_type_clf_weight, tags_sequence_clf_weight=args.\n tags_sequence_clf_weight, relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type, config=config)\n print('task weights:', model.sent_type_clf_weight, model.\n tags_sequence_clf_weight, model.relations_sequence_clf_weight)\n else:\n model = models[model_name].from_pretrained(dest_tmp_model_path,\n num_sent_type_labels=num_sent_type_labels,\n num_tags_sequence_labels=num_tags_sequence_labels,\n num_relations_sequence_labels=num_relations_sequence_labels,\n sent_type_clf_weight=args.sent_type_clf_weight,\n tags_sequence_clf_weight=args.tags_sequence_clf_weight,\n relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type)\n model.to(device)\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features, eval_new_examples = model.convert_examples_to_features(\n eval_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Dev *****')\n logger.info(' Num examples = %d', len(eval_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids\n ) = get_dataloader_and_tensors(eval_features, args.eval_batch_size)\n if not args.do_eval:\n test_file = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = model.convert_examples_to_features(\n test_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids, test_relations_sequence_labels_ids\n ) = (get_dataloader_and_tensors(test_features, args.\n eval_batch_size))\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n train_features, _ = model.convert_examples_to_features(train_examples,\n label2id, args.max_seq_length, tokenizer, logger, args.\n sequence_mode, context_mode=args.context_mode)\n if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':\n train_features = sorted(train_features, key=lambda f: np.sum(f.\n input_mask))\n else:\n random.shuffle(train_features)\n (train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids) = (get_dataloader_and_tensors(\n train_features, args.train_batch_size))\n train_batches = [batch for batch in train_dataloader]\n num_train_optimization_steps = len(train_dataloader\n ) // args.gradient_accumulation_steps * args.num_train_epochs\n warmup_steps = int(args.warmup_proportion *\n num_train_optimization_steps)\n logger.info('***** Training *****')\n logger.info(' Num examples = %d', len(train_examples))\n logger.info(' Batch size = %d', args.train_batch_size)\n logger.info(' Num steps = %d', num_train_optimization_steps)\n best_result = defaultdict(float)\n for eval_metric in eval_metrics:\n best_result[eval_metric] = args.threshold\n if eval_metric.startswith('sent_type'):\n best_result[eval_metric] += 0.2\n print('best results thresholds:')\n print(best_result)\n eval_step = max(1, len(train_batches) // args.eval_per_epoch)\n lr = float(args.learning_rate)\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [{'params': [param for name, param in\n param_optimizer if not any(nd in name for nd in no_decay)],\n 'weight_decay': float(args.weight_decay)}, {'params': [param for\n name, param in param_optimizer if any(nd in name for nd in\n no_decay)], 'weight_decay': 0.0}]\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n if args.lr_schedule == 'constant_warmup':\n print('lr schedule = constant_warmup')\n scheduler = get_constant_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps)\n else:\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=\n num_train_optimization_steps)\n start_time = time.time()\n global_step = 0\n for epoch in range(1, 1 + int(args.num_train_epochs)):\n tr_loss = 0\n nb_tr_examples = 0\n nb_tr_steps = 0\n cur_train_loss = defaultdict(float)\n model.train()\n logger.info('Start epoch #{} (lr = {})...'.format(epoch, lr))\n if (args.train_mode == 'random' or args.train_mode ==\n 'random_sorted'):\n random.shuffle(train_batches)\n for step, batch in enumerate(tqdm(train_batches, total=len(\n train_batches), desc='training ... ')):\n batch = tuple(t.to(device) for t in batch)\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n train_loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask,\n sent_type_labels=sent_type_labels_ids,\n tags_sequence_labels=tags_sequence_labels_ids,\n relations_sequence_labels=relations_sequence_labels_ids,\n token_valid_pos_ids=token_valid_pos_ids, return_outputs\n =False, device=device)\n for key in train_loss:\n cur_train_loss[key] += train_loss[key].mean().item()\n loss_to_optimize = train_loss['weighted_loss']\n if n_gpu > 1:\n loss_to_optimize = loss_to_optimize.mean()\n if args.gradient_accumulation_steps > 1:\n loss_to_optimize = (loss_to_optimize / args.\n gradient_accumulation_steps)\n loss_to_optimize.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.\n max_grad_norm)\n tr_loss += loss_to_optimize.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n global_step += 1\n if args.do_validate and (step + 1) % eval_step == 0:\n logger.info(\n 'Ep: {}, Stp: {}/{}, usd_t={:.2f}s, loss={:.6f}'.\n format(epoch, step + 1, len(train_batches), time.\n time() - start_time, tr_loss / nb_tr_steps))\n predict_for_metrics = []\n cur_train_mean_loss = {}\n for key in cur_train_loss:\n cur_train_mean_loss[f'train_{key}'] = cur_train_loss[\n key] / nb_tr_steps\n preds, result, scores = evaluate(model, device,\n eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids,\n eval_relations_sequence_labels_ids, label2id,\n cur_train_mean_loss=cur_train_mean_loss, logger=\n eval_logger)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'] = (args.train_batch_size * args.\n gradient_accumulation_steps)\n for key, value in result.items():\n dev_writer.add_scalar(key, value, global_step)\n for key, value in cur_train_mean_loss.items():\n train_writer.add_scalar(f'running_train_{key}',\n value, global_step)\n logger.info('First 20 predictions:')\n for sent_type_pred, sent_type_label in zip(preds[\n 'sent_type'][:20], eval_sent_type_labels_ids.numpy(\n )[:20]):\n sign = (u'✓' if sent_type_pred == sent_type_label else\n u'✘')\n logger.info('pred = %s, label = %s %s' % (id2label[\n 'sent_type'][sent_type_pred], id2label[\n 'sent_type'][sent_type_label], sign))\n for eval_metric in eval_metrics:\n if result[eval_metric] > best_result[eval_metric]:\n best_result[eval_metric] = result[eval_metric]\n logger.info(\n '!!! Best dev %s (lr=%s, epoch=%d): %.2f' %\n (eval_metric, str(lr), epoch, result[\n eval_metric] * 100.0))\n predict_for_metrics.append(eval_metric)\n for metric_id, eval_metric in tqdm(enumerate(\n predict_for_metrics), total=len(predict_for_metrics\n ), desc='writing predictions ... '):\n dest_file = f'dev_best_{eval_metric}'\n write_predictions(args, eval_new_examples,\n eval_features, preds, scores, dest_file,\n label2id=label2id, id2label=id2label, metrics=\n result)\n if metric_id == 0:\n test_preds, test_result, test_scores = evaluate(\n model, device, test_dataloader,\n test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids,\n label2id, cur_train_mean_loss=None, logger=None\n )\n output_model_file = os.path.join(args.\n output_dir,\n f'best_{eval_metric}_{WEIGHTS_NAME}')\n save_model(args, model, tokenizer,\n output_model_file)\n for metric in predict_for_metrics[1:]:\n dest_model_path = os.path.join(args.\n output_dir, f'best_{metric}_{WEIGHTS_NAME}'\n )\n os.system(\n f'cp {output_model_file} {dest_model_path}'\n )\n dest_file = f'test_best_{eval_metric}'\n write_predictions(args, test_new_examples,\n test_features, test_preds, test_scores,\n dest_file, label2id=label2id, id2label=id2label,\n metrics=test_result)\n if args.log_train_metrics:\n preds, result, scores = evaluate(model, device,\n train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids, label2id, logger=logger,\n skip_every_n_examples=args.skip_every_n_examples)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'\n ] = args.train_batch_size * args.gradient_accumulation_steps\n for key, value in result.items():\n train_writer.add_scalar(key, value, global_step)\n if args.do_eval:\n test_files = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n for test_file in test_files.split('8'):\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = (model.\n convert_examples_to_features(test_examples, label2id, args.\n max_seq_length, tokenizer, logger, args.sequence_mode,\n context_mode=args.context_mode))\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids) = (\n get_dataloader_and_tensors(test_features, args.eval_batch_size)\n )\n preds, result, scores = evaluate(model, device, test_dataloader,\n test_sent_type_labels_ids, test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids, label2id,\n compute_metrics=False)\n dest_file = args.model_prefix + test_file.split('/')[-1].replace(\n '.json', '')\n write_predictions(args, test_new_examples, test_features, preds,\n scores, dest_file, label2id=label2id, id2label=id2label,\n metrics=result)\n if rm_model:\n shutil.rmtree(dest_tmp_model_path)\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(args, examples, features, preds, scores, dest_file,\n label2id, id2label, metrics=None):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {'tags_sequence': 'O', 'relations_sequence': '0'}\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [(list(pred[orig_positions]) + [label2id\n [task][neg_label_mapper[task]]] * (len(ex.tokens) - len(\n orig_positions))) for pred, orig_positions, ex in zip(preds[\n task], orig_positions_map, examples)]\n aggregated_results[f'{task}_scores'] = [(list(score[orig_positions]\n ) + [0.999] * (len(ex.tokens) - len(orig_positions))) for score,\n orig_positions, ex in zip(scores[task], orig_positions_map,\n examples)]\n prediction_results = {'idx': [ex.guid for ex in examples], 'tokens': [\n ' '.join(ex.tokens) for ex in examples], 'sent_type_label': [ex.\n sent_type for ex in examples], 'sent_type_pred': [id2label[\n 'sent_type'][x] for x in preds['sent_type']], 'sent_type_scores': [\n str(score) for score in scores['sent_type']], 'sent_start': [ex.\n sent_start for ex in examples], 'sent_end': [ex.sent_end for ex in\n examples], 'tags_sequence_labels': [' '.join(ex.tags_sequence) for\n ex in examples], 'tags_sequence_pred': [' '.join([(id2label[\n 'tags_sequence'][x] if x != 0 else 'O') for x in sent]) for sent in\n aggregated_results['tags_sequence']], 'tags_sequence_scores': [' '.\n join([str(score) for score in sent]) for sent in aggregated_results\n ['tags_sequence_scores']], 'tags_ids': [' '.join(ex.tags_ids) for\n ex in examples], 'relations_sequence_labels': [' '.join(ex.\n relations_sequence) for ex in examples], 'relations_sequence_pred':\n [' '.join([(id2label['relations_sequence'][x] if x != 0 else '0') for\n x in sent]) for sent in aggregated_results['relations_sequence']],\n 'relations_sequence_scores': [' '.join([str(score) for score in\n sent]) for sent in aggregated_results['relations_sequence_scores']],\n 'subj_start': [ex.subj_start for ex in examples], 'subj_end': [ex.\n subj_end for ex in examples], 'infile_offsets': [' '.join([str(\n offset) for offset in ex.infile_offsets]) for ex in examples],\n 'start_char': [' '.join(ex.start_char) for ex in examples],\n 'end_char': [' '.join(ex.end_char) for ex in examples], 'source': [\n ex.source for ex in examples]}\n prediction_results = pd.DataFrame(prediction_results)\n prediction_results.to_csv(os.path.join(args.output_dir,\n f'{dest_file}.tsv'), sep='\\t', index=False)\n if metrics is not None:\n with open(os.path.join(args.output_dir,\n f'{dest_file}_eval_results.txt'), 'w') as f:\n for key in sorted(metrics.keys()):\n f.write('%s = %s\\n' % (key, str(metrics[key])))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_file', default='', type=str, required=False)\n parser.add_argument('--model', default='bert-large-uncased', type=str,\n required=True)\n parser.add_argument('--data_dir', default='data', type=str, required=\n True, help=\n 'The input data dir. Should contain the .json files for the task.')\n parser.add_argument('--output_dir', default=None, type=str, required=\n True, help=\n 'The output directory where the model predictions and checkpoints will be written.'\n )\n parser.add_argument('--eval_per_epoch', default=4, type=int, help=\n 'How many times to do validation on dev set per epoch')\n parser.add_argument('--max_seq_length', default=256, type=int, help=\n \"\"\"The maximum total input sequence length after WordPiece tokenization.\nSequences longer than this will be truncated, and sequences shorter\nthan this will be padded.\"\"\"\n )\n parser.add_argument('--do_train', action='store_true', help=\n 'Whether to run training.')\n parser.add_argument('--train_mode', type=str, default='random_sorted',\n choices=['random', 'sorted', 'random_sorted'])\n parser.add_argument('--do_validate', action='store_true', help=\n 'Whether to run validation on dev set.')\n parser.add_argument('--do_eval', action='store_true', help=\n 'Whether to run eval on the test set.')\n parser.add_argument('--train_batch_size', default=32, type=int, help=\n 'Total batch size for training.')\n parser.add_argument('--eval_batch_size', default=8, type=int, help=\n 'Total batch size for eval.')\n parser.add_argument('--eval_metrics', default='+'.join([\n 'sent_type_1_f1-score', 'tags_sequence_macro-avg_f1-score',\n 'relations_sequence_macro-avg_f1-score']), type=str)\n parser.add_argument('--learning_rate', default=1e-05, type=float, help=\n 'The initial learning rate for Adam.')\n parser.add_argument('--num_train_epochs', default=6.0, type=float, help\n ='Total number of training epochs to perform.')\n parser.add_argument('--warmup_proportion', default=0.1, type=float,\n help=\n \"\"\"Proportion of training to perform linear learning rate warmup.\nE.g., 0.1 = 10%% of training.\"\"\"\n )\n parser.add_argument('--max_grad_norm', default=1.0, type=float, help=\n 'maximal gradient norm')\n parser.add_argument('--sent_type_clf_weight', default=1.0, type=float,\n help='the weight of task 1')\n parser.add_argument('--tags_sequence_clf_weight', default=1.0, type=\n float, help='The weight of task 2')\n parser.add_argument('--relations_sequence_clf_weight', default=1.0,\n type=float, help='The weight of task 3')\n parser.add_argument('--weight_decay', default=0.1, type=float, help=\n 'weight_decay coefficient for regularization')\n parser.add_argument('--dropout', default=0.1, type=float, help=\n 'dropout rate')\n parser.add_argument('--no_cuda', action='store_true', help=\n 'Whether not to use CUDA when available')\n parser.add_argument('--seed', type=int, default=42, help=\n 'random seed for initialization')\n parser.add_argument('--gradient_accumulation_steps', type=int, default=\n 8, help=\n 'Number of updates steps to accumulate before performing a backward/update pass.'\n )\n parser.add_argument('--filter_task_3', action='store_true', help=\n 'exclude task 3 from training')\n parser.add_argument('--filter_task_1', action='store_true', help=\n 'exclude task 1 from training')\n parser.add_argument('--subtokens_pooling_type', type=str, default=\n 'first', help='pooling mode in bert-ner, one of avg or first')\n parser.add_argument('--sequence_mode', type=str, default='not-all',\n help='train to predict for all subtokens or notall or not-all')\n parser.add_argument('--context_mode', type=str, default='full', help=\n 'context for task 1: one from center, full, left, right')\n parser.add_argument('--lr_schedule', type=str, default='linear_warmup',\n help='lr adjustment schedule')\n parser.add_argument('--log_train_metrics', action='store_true', help=\n 'compute metrics for train set too')\n parser.add_argument('--threshold', type=float, default=0.3, help=\n 'threshold for best models to save')\n parser.add_argument('--output_dirs_to_exclude', type=str, default='',\n help='path to json file containing list of output' +\n ' dirs to exclude fome trainig')\n parser.add_argument('--skip_every_n_examples', type=int, default=30,\n help='number examples in train set to skip in evaluating metrics')\n parser.add_argument('--model_prefix', type=str, default=\n 'best_sent_type_1_f1-score_', help='pefix of the model weight')\n parsed_args = parser.parse_args()\n main(parsed_args)\n",
"<import token>\nlogging.basicConfig(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n<assignment token>\n\n\ndef compute_all_metrics(sent_type_labels, sent_type_preds,\n tags_sequence_labels, tags_sequence_preds, relations_sequence_labels,\n relations_sequence_preds, label2id, loss_info=None, logger=None):\n eval_tags_sequence_labels = [label2id['tags_sequence'][lab] for lab in\n EVAL_TAGS]\n eval_relations_sequence_labels = [label2id['relations_sequence'][lab] for\n lab in EVAL_RELATIONS]\n task_1_report = classification_report(sent_type_labels, sent_type_preds,\n labels=[0, 1], output_dict=True)\n task_2_report = classification_report(tags_sequence_labels,\n tags_sequence_preds, labels=eval_tags_sequence_labels, output_dict=True\n )\n task_3_report = classification_report(relations_sequence_labels,\n relations_sequence_preds, labels=eval_relations_sequence_labels,\n output_dict=True)\n result = {}\n for x in ['0', '1', 'weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"sent_type_{x.replace(' ', '-')}_{metrics}\"] = round(\n task_1_report[x][metrics], 6)\n id2label = {val: key for key, val in label2id['tags_sequence'].items()}\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_tags_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'tags_sequence_{id2label[x]}_{metrics}'] = round(\n task_2_report[str(x)][metrics], 6)\n id2label = {val: key for key, val in label2id['relations_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_relations_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'relations_sequence_{id2label[x]}_{metrics}'] = round(\n task_3_report[str(x)][metrics], 6)\n if logger is not None:\n logger.info('=====================================')\n for key in sorted(result.keys()):\n logger.info(' %s = %s', key, str(result[key]))\n if loss_info is not None:\n for key in sorted(loss_info.keys()):\n logger.info(' %s = %s', key, str(loss_info[key]))\n return result\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\ndef main(args):\n if os.path.exists(args.output_dir) and args.do_train:\n from glob import glob\n tsv_files = glob(os.path.join(args.output_dir, '*best*tsv'))\n if tsv_files:\n print('already computed: skipping ...')\n return\n else:\n print(\n f'already existing {args.output_dir}. but without weight file and tsv files ...'\n )\n os.system(f'rm -r {args.output_dir}')\n assert args.context_mode in ['full', 'center', 'left', 'right']\n source_model = os.path.join(args.output_dir,\n f'{args.model_prefix}pytorch_model.bin')\n dest_model = os.path.join(args.output_dir, 'pytorch_model.bin')\n rm_model = False\n if args.do_eval:\n if not os.path.exists(source_model):\n print(f'returning ... not found {source_model}')\n return\n if source_model != dest_model:\n rm_model = True\n dest_tmp_model_path = tempfile.mkdtemp()\n os.system(\n f\"cp {source_model} {os.path.join(dest_tmp_model_path, 'pytorch_model.bin')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'config.json')} {os.path.join(dest_tmp_model_path, 'config.json')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'vocab.txt')} {os.path.join(dest_tmp_model_path, 'vocab.txt')}\"\n )\n else:\n dest_tmp_model_path = args.output_dir\n device = torch.device('cuda' if torch.cuda.is_available() and not args.\n no_cuda else 'cpu')\n n_gpu = torch.cuda.device_count()\n if args.gradient_accumulation_steps < 1:\n raise ValueError('gradient_accumulation_steps parameter should be >= 1'\n )\n args.train_batch_size = (args.train_batch_size // args.\n gradient_accumulation_steps)\n if args.do_train:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n if not args.do_train and not args.do_eval:\n raise ValueError(\n 'At least one of `do_train` or `do_eval` must be True.')\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n elif args.do_train or args.do_validate:\n raise ValueError(args.output_dir, 'output_dir already exists')\n suffix = datetime.now().isoformat().replace('-', '_').replace(':', '_'\n ).split('.')[0].replace('T', '-')\n if args.do_train:\n train_writer = SummaryWriter(log_dir=os.path.join('tensorboard',\n args.output_dir, 'train'))\n dev_writer = SummaryWriter(log_dir=os.path.join('tensorboard', args\n .output_dir, 'dev'))\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'train_{suffix}.log'), 'w'))\n eval_logger.addHandler(logging.FileHandler(os.path.join(args.\n output_dir, f'scores_{suffix}.log'), 'w'))\n else:\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'eval_{suffix}.log'), 'w'))\n logger.info(args)\n logger.info('device: {}, n_gpu: {}'.format(device, n_gpu))\n processor = DataProcessor(filter_task_1=args.filter_task_1,\n filter_task_3=args.filter_task_3)\n eval_metrics = {eval_metric: (True) for eval_metric in args.\n eval_metrics.split('+')}\n if args.filter_task_1 and args.do_train:\n assert args.sent_type_clf_weight == 0.0\n eval_metrics.pop('sent_type_1_f1-score')\n if args.filter_task_3 and args.do_train:\n assert args.relations_sequence_clf_weight == 0.0\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n if (args.sent_type_clf_weight == 0.0 and 'sent_type_1_f1-score' in\n eval_metrics):\n eval_metrics.pop('sent_type_1_f1-score')\n if (args.tags_sequence_clf_weight == 0.0 and \n 'tags_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('tags_sequence_macro-avg_f1-score')\n if (args.relations_sequence_clf_weight == 0.0 and \n 'relations_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n assert len(eval_metrics) > 0, 'inconsistent train params'\n if args.context_mode != 'full':\n keys = list(eval_metrics.keys())\n for key in keys:\n if key != 'sent_type_1_f1-score':\n eval_metrics.pop(key)\n assert 'sent_type_1_f1-score' in eval_metrics\n sent_type_labels_list = processor.get_sent_type_labels(args.data_dir,\n logger)\n tags_sequence_labels_list = processor.get_sequence_labels(args.data_dir,\n logger=logger, sequence_type='tags_sequence')\n relations_sequence_labels_list = processor.get_sequence_labels(args.\n data_dir, logger=logger, sequence_type='relations_sequence')\n label2id = {'sent_type': {label: i for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {label: i for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {\n label: i for i, label in enumerate(relations_sequence_labels_list, 1)}}\n id2label = {'sent_type': {i: label for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {i: label for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {i:\n label for i, label in enumerate(relations_sequence_labels_list, 1)}}\n num_sent_type_labels = len(sent_type_labels_list)\n num_tags_sequence_labels = len(tags_sequence_labels_list) + 1\n num_relations_sequence_labels = len(relations_sequence_labels_list) + 1\n do_lower_case = 'uncased' in args.model\n tokenizer = tokenizers[args.model].from_pretrained(args.model,\n do_lower_case=do_lower_case)\n model_name = args.model\n if args.do_train:\n config = configs[args.model]\n config = config.from_pretrained(args.model, hidden_dropout_prob=\n args.dropout)\n model = models[model_name].from_pretrained(args.model, cache_dir=\n str(PYTORCH_PRETRAINED_BERT_CACHE), num_sent_type_labels=\n num_sent_type_labels, num_tags_sequence_labels=\n num_tags_sequence_labels, num_relations_sequence_labels=\n num_relations_sequence_labels, sent_type_clf_weight=args.\n sent_type_clf_weight, tags_sequence_clf_weight=args.\n tags_sequence_clf_weight, relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type, config=config)\n print('task weights:', model.sent_type_clf_weight, model.\n tags_sequence_clf_weight, model.relations_sequence_clf_weight)\n else:\n model = models[model_name].from_pretrained(dest_tmp_model_path,\n num_sent_type_labels=num_sent_type_labels,\n num_tags_sequence_labels=num_tags_sequence_labels,\n num_relations_sequence_labels=num_relations_sequence_labels,\n sent_type_clf_weight=args.sent_type_clf_weight,\n tags_sequence_clf_weight=args.tags_sequence_clf_weight,\n relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type)\n model.to(device)\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features, eval_new_examples = model.convert_examples_to_features(\n eval_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Dev *****')\n logger.info(' Num examples = %d', len(eval_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids\n ) = get_dataloader_and_tensors(eval_features, args.eval_batch_size)\n if not args.do_eval:\n test_file = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = model.convert_examples_to_features(\n test_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids, test_relations_sequence_labels_ids\n ) = (get_dataloader_and_tensors(test_features, args.\n eval_batch_size))\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n train_features, _ = model.convert_examples_to_features(train_examples,\n label2id, args.max_seq_length, tokenizer, logger, args.\n sequence_mode, context_mode=args.context_mode)\n if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':\n train_features = sorted(train_features, key=lambda f: np.sum(f.\n input_mask))\n else:\n random.shuffle(train_features)\n (train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids) = (get_dataloader_and_tensors(\n train_features, args.train_batch_size))\n train_batches = [batch for batch in train_dataloader]\n num_train_optimization_steps = len(train_dataloader\n ) // args.gradient_accumulation_steps * args.num_train_epochs\n warmup_steps = int(args.warmup_proportion *\n num_train_optimization_steps)\n logger.info('***** Training *****')\n logger.info(' Num examples = %d', len(train_examples))\n logger.info(' Batch size = %d', args.train_batch_size)\n logger.info(' Num steps = %d', num_train_optimization_steps)\n best_result = defaultdict(float)\n for eval_metric in eval_metrics:\n best_result[eval_metric] = args.threshold\n if eval_metric.startswith('sent_type'):\n best_result[eval_metric] += 0.2\n print('best results thresholds:')\n print(best_result)\n eval_step = max(1, len(train_batches) // args.eval_per_epoch)\n lr = float(args.learning_rate)\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [{'params': [param for name, param in\n param_optimizer if not any(nd in name for nd in no_decay)],\n 'weight_decay': float(args.weight_decay)}, {'params': [param for\n name, param in param_optimizer if any(nd in name for nd in\n no_decay)], 'weight_decay': 0.0}]\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n if args.lr_schedule == 'constant_warmup':\n print('lr schedule = constant_warmup')\n scheduler = get_constant_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps)\n else:\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=\n num_train_optimization_steps)\n start_time = time.time()\n global_step = 0\n for epoch in range(1, 1 + int(args.num_train_epochs)):\n tr_loss = 0\n nb_tr_examples = 0\n nb_tr_steps = 0\n cur_train_loss = defaultdict(float)\n model.train()\n logger.info('Start epoch #{} (lr = {})...'.format(epoch, lr))\n if (args.train_mode == 'random' or args.train_mode ==\n 'random_sorted'):\n random.shuffle(train_batches)\n for step, batch in enumerate(tqdm(train_batches, total=len(\n train_batches), desc='training ... ')):\n batch = tuple(t.to(device) for t in batch)\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n train_loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask,\n sent_type_labels=sent_type_labels_ids,\n tags_sequence_labels=tags_sequence_labels_ids,\n relations_sequence_labels=relations_sequence_labels_ids,\n token_valid_pos_ids=token_valid_pos_ids, return_outputs\n =False, device=device)\n for key in train_loss:\n cur_train_loss[key] += train_loss[key].mean().item()\n loss_to_optimize = train_loss['weighted_loss']\n if n_gpu > 1:\n loss_to_optimize = loss_to_optimize.mean()\n if args.gradient_accumulation_steps > 1:\n loss_to_optimize = (loss_to_optimize / args.\n gradient_accumulation_steps)\n loss_to_optimize.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.\n max_grad_norm)\n tr_loss += loss_to_optimize.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n global_step += 1\n if args.do_validate and (step + 1) % eval_step == 0:\n logger.info(\n 'Ep: {}, Stp: {}/{}, usd_t={:.2f}s, loss={:.6f}'.\n format(epoch, step + 1, len(train_batches), time.\n time() - start_time, tr_loss / nb_tr_steps))\n predict_for_metrics = []\n cur_train_mean_loss = {}\n for key in cur_train_loss:\n cur_train_mean_loss[f'train_{key}'] = cur_train_loss[\n key] / nb_tr_steps\n preds, result, scores = evaluate(model, device,\n eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids,\n eval_relations_sequence_labels_ids, label2id,\n cur_train_mean_loss=cur_train_mean_loss, logger=\n eval_logger)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'] = (args.train_batch_size * args.\n gradient_accumulation_steps)\n for key, value in result.items():\n dev_writer.add_scalar(key, value, global_step)\n for key, value in cur_train_mean_loss.items():\n train_writer.add_scalar(f'running_train_{key}',\n value, global_step)\n logger.info('First 20 predictions:')\n for sent_type_pred, sent_type_label in zip(preds[\n 'sent_type'][:20], eval_sent_type_labels_ids.numpy(\n )[:20]):\n sign = (u'✓' if sent_type_pred == sent_type_label else\n u'✘')\n logger.info('pred = %s, label = %s %s' % (id2label[\n 'sent_type'][sent_type_pred], id2label[\n 'sent_type'][sent_type_label], sign))\n for eval_metric in eval_metrics:\n if result[eval_metric] > best_result[eval_metric]:\n best_result[eval_metric] = result[eval_metric]\n logger.info(\n '!!! Best dev %s (lr=%s, epoch=%d): %.2f' %\n (eval_metric, str(lr), epoch, result[\n eval_metric] * 100.0))\n predict_for_metrics.append(eval_metric)\n for metric_id, eval_metric in tqdm(enumerate(\n predict_for_metrics), total=len(predict_for_metrics\n ), desc='writing predictions ... '):\n dest_file = f'dev_best_{eval_metric}'\n write_predictions(args, eval_new_examples,\n eval_features, preds, scores, dest_file,\n label2id=label2id, id2label=id2label, metrics=\n result)\n if metric_id == 0:\n test_preds, test_result, test_scores = evaluate(\n model, device, test_dataloader,\n test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids,\n label2id, cur_train_mean_loss=None, logger=None\n )\n output_model_file = os.path.join(args.\n output_dir,\n f'best_{eval_metric}_{WEIGHTS_NAME}')\n save_model(args, model, tokenizer,\n output_model_file)\n for metric in predict_for_metrics[1:]:\n dest_model_path = os.path.join(args.\n output_dir, f'best_{metric}_{WEIGHTS_NAME}'\n )\n os.system(\n f'cp {output_model_file} {dest_model_path}'\n )\n dest_file = f'test_best_{eval_metric}'\n write_predictions(args, test_new_examples,\n test_features, test_preds, test_scores,\n dest_file, label2id=label2id, id2label=id2label,\n metrics=test_result)\n if args.log_train_metrics:\n preds, result, scores = evaluate(model, device,\n train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids, label2id, logger=logger,\n skip_every_n_examples=args.skip_every_n_examples)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'\n ] = args.train_batch_size * args.gradient_accumulation_steps\n for key, value in result.items():\n train_writer.add_scalar(key, value, global_step)\n if args.do_eval:\n test_files = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n for test_file in test_files.split('8'):\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = (model.\n convert_examples_to_features(test_examples, label2id, args.\n max_seq_length, tokenizer, logger, args.sequence_mode,\n context_mode=args.context_mode))\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids) = (\n get_dataloader_and_tensors(test_features, args.eval_batch_size)\n )\n preds, result, scores = evaluate(model, device, test_dataloader,\n test_sent_type_labels_ids, test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids, label2id,\n compute_metrics=False)\n dest_file = args.model_prefix + test_file.split('/')[-1].replace(\n '.json', '')\n write_predictions(args, test_new_examples, test_features, preds,\n scores, dest_file, label2id=label2id, id2label=id2label,\n metrics=result)\n if rm_model:\n shutil.rmtree(dest_tmp_model_path)\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(args, examples, features, preds, scores, dest_file,\n label2id, id2label, metrics=None):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {'tags_sequence': 'O', 'relations_sequence': '0'}\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [(list(pred[orig_positions]) + [label2id\n [task][neg_label_mapper[task]]] * (len(ex.tokens) - len(\n orig_positions))) for pred, orig_positions, ex in zip(preds[\n task], orig_positions_map, examples)]\n aggregated_results[f'{task}_scores'] = [(list(score[orig_positions]\n ) + [0.999] * (len(ex.tokens) - len(orig_positions))) for score,\n orig_positions, ex in zip(scores[task], orig_positions_map,\n examples)]\n prediction_results = {'idx': [ex.guid for ex in examples], 'tokens': [\n ' '.join(ex.tokens) for ex in examples], 'sent_type_label': [ex.\n sent_type for ex in examples], 'sent_type_pred': [id2label[\n 'sent_type'][x] for x in preds['sent_type']], 'sent_type_scores': [\n str(score) for score in scores['sent_type']], 'sent_start': [ex.\n sent_start for ex in examples], 'sent_end': [ex.sent_end for ex in\n examples], 'tags_sequence_labels': [' '.join(ex.tags_sequence) for\n ex in examples], 'tags_sequence_pred': [' '.join([(id2label[\n 'tags_sequence'][x] if x != 0 else 'O') for x in sent]) for sent in\n aggregated_results['tags_sequence']], 'tags_sequence_scores': [' '.\n join([str(score) for score in sent]) for sent in aggregated_results\n ['tags_sequence_scores']], 'tags_ids': [' '.join(ex.tags_ids) for\n ex in examples], 'relations_sequence_labels': [' '.join(ex.\n relations_sequence) for ex in examples], 'relations_sequence_pred':\n [' '.join([(id2label['relations_sequence'][x] if x != 0 else '0') for\n x in sent]) for sent in aggregated_results['relations_sequence']],\n 'relations_sequence_scores': [' '.join([str(score) for score in\n sent]) for sent in aggregated_results['relations_sequence_scores']],\n 'subj_start': [ex.subj_start for ex in examples], 'subj_end': [ex.\n subj_end for ex in examples], 'infile_offsets': [' '.join([str(\n offset) for offset in ex.infile_offsets]) for ex in examples],\n 'start_char': [' '.join(ex.start_char) for ex in examples],\n 'end_char': [' '.join(ex.end_char) for ex in examples], 'source': [\n ex.source for ex in examples]}\n prediction_results = pd.DataFrame(prediction_results)\n prediction_results.to_csv(os.path.join(args.output_dir,\n f'{dest_file}.tsv'), sep='\\t', index=False)\n if metrics is not None:\n with open(os.path.join(args.output_dir,\n f'{dest_file}_eval_results.txt'), 'w') as f:\n for key in sorted(metrics.keys()):\n f.write('%s = %s\\n' % (key, str(metrics[key])))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_file', default='', type=str, required=False)\n parser.add_argument('--model', default='bert-large-uncased', type=str,\n required=True)\n parser.add_argument('--data_dir', default='data', type=str, required=\n True, help=\n 'The input data dir. Should contain the .json files for the task.')\n parser.add_argument('--output_dir', default=None, type=str, required=\n True, help=\n 'The output directory where the model predictions and checkpoints will be written.'\n )\n parser.add_argument('--eval_per_epoch', default=4, type=int, help=\n 'How many times to do validation on dev set per epoch')\n parser.add_argument('--max_seq_length', default=256, type=int, help=\n \"\"\"The maximum total input sequence length after WordPiece tokenization.\nSequences longer than this will be truncated, and sequences shorter\nthan this will be padded.\"\"\"\n )\n parser.add_argument('--do_train', action='store_true', help=\n 'Whether to run training.')\n parser.add_argument('--train_mode', type=str, default='random_sorted',\n choices=['random', 'sorted', 'random_sorted'])\n parser.add_argument('--do_validate', action='store_true', help=\n 'Whether to run validation on dev set.')\n parser.add_argument('--do_eval', action='store_true', help=\n 'Whether to run eval on the test set.')\n parser.add_argument('--train_batch_size', default=32, type=int, help=\n 'Total batch size for training.')\n parser.add_argument('--eval_batch_size', default=8, type=int, help=\n 'Total batch size for eval.')\n parser.add_argument('--eval_metrics', default='+'.join([\n 'sent_type_1_f1-score', 'tags_sequence_macro-avg_f1-score',\n 'relations_sequence_macro-avg_f1-score']), type=str)\n parser.add_argument('--learning_rate', default=1e-05, type=float, help=\n 'The initial learning rate for Adam.')\n parser.add_argument('--num_train_epochs', default=6.0, type=float, help\n ='Total number of training epochs to perform.')\n parser.add_argument('--warmup_proportion', default=0.1, type=float,\n help=\n \"\"\"Proportion of training to perform linear learning rate warmup.\nE.g., 0.1 = 10%% of training.\"\"\"\n )\n parser.add_argument('--max_grad_norm', default=1.0, type=float, help=\n 'maximal gradient norm')\n parser.add_argument('--sent_type_clf_weight', default=1.0, type=float,\n help='the weight of task 1')\n parser.add_argument('--tags_sequence_clf_weight', default=1.0, type=\n float, help='The weight of task 2')\n parser.add_argument('--relations_sequence_clf_weight', default=1.0,\n type=float, help='The weight of task 3')\n parser.add_argument('--weight_decay', default=0.1, type=float, help=\n 'weight_decay coefficient for regularization')\n parser.add_argument('--dropout', default=0.1, type=float, help=\n 'dropout rate')\n parser.add_argument('--no_cuda', action='store_true', help=\n 'Whether not to use CUDA when available')\n parser.add_argument('--seed', type=int, default=42, help=\n 'random seed for initialization')\n parser.add_argument('--gradient_accumulation_steps', type=int, default=\n 8, help=\n 'Number of updates steps to accumulate before performing a backward/update pass.'\n )\n parser.add_argument('--filter_task_3', action='store_true', help=\n 'exclude task 3 from training')\n parser.add_argument('--filter_task_1', action='store_true', help=\n 'exclude task 1 from training')\n parser.add_argument('--subtokens_pooling_type', type=str, default=\n 'first', help='pooling mode in bert-ner, one of avg or first')\n parser.add_argument('--sequence_mode', type=str, default='not-all',\n help='train to predict for all subtokens or notall or not-all')\n parser.add_argument('--context_mode', type=str, default='full', help=\n 'context for task 1: one from center, full, left, right')\n parser.add_argument('--lr_schedule', type=str, default='linear_warmup',\n help='lr adjustment schedule')\n parser.add_argument('--log_train_metrics', action='store_true', help=\n 'compute metrics for train set too')\n parser.add_argument('--threshold', type=float, default=0.3, help=\n 'threshold for best models to save')\n parser.add_argument('--output_dirs_to_exclude', type=str, default='',\n help='path to json file containing list of output' +\n ' dirs to exclude fome trainig')\n parser.add_argument('--skip_every_n_examples', type=int, default=30,\n help='number examples in train set to skip in evaluating metrics')\n parser.add_argument('--model_prefix', type=str, default=\n 'best_sent_type_1_f1-score_', help='pefix of the model weight')\n parsed_args = parser.parse_args()\n main(parsed_args)\n",
"<import token>\n<code token>\n<assignment token>\n\n\ndef compute_all_metrics(sent_type_labels, sent_type_preds,\n tags_sequence_labels, tags_sequence_preds, relations_sequence_labels,\n relations_sequence_preds, label2id, loss_info=None, logger=None):\n eval_tags_sequence_labels = [label2id['tags_sequence'][lab] for lab in\n EVAL_TAGS]\n eval_relations_sequence_labels = [label2id['relations_sequence'][lab] for\n lab in EVAL_RELATIONS]\n task_1_report = classification_report(sent_type_labels, sent_type_preds,\n labels=[0, 1], output_dict=True)\n task_2_report = classification_report(tags_sequence_labels,\n tags_sequence_preds, labels=eval_tags_sequence_labels, output_dict=True\n )\n task_3_report = classification_report(relations_sequence_labels,\n relations_sequence_preds, labels=eval_relations_sequence_labels,\n output_dict=True)\n result = {}\n for x in ['0', '1', 'weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"sent_type_{x.replace(' ', '-')}_{metrics}\"] = round(\n task_1_report[x][metrics], 6)\n id2label = {val: key for key, val in label2id['tags_sequence'].items()}\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_tags_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'tags_sequence_{id2label[x]}_{metrics}'] = round(\n task_2_report[str(x)][metrics], 6)\n id2label = {val: key for key, val in label2id['relations_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_relations_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'relations_sequence_{id2label[x]}_{metrics}'] = round(\n task_3_report[str(x)][metrics], 6)\n if logger is not None:\n logger.info('=====================================')\n for key in sorted(result.keys()):\n logger.info(' %s = %s', key, str(result[key]))\n if loss_info is not None:\n for key in sorted(loss_info.keys()):\n logger.info(' %s = %s', key, str(loss_info[key]))\n return result\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\ndef main(args):\n if os.path.exists(args.output_dir) and args.do_train:\n from glob import glob\n tsv_files = glob(os.path.join(args.output_dir, '*best*tsv'))\n if tsv_files:\n print('already computed: skipping ...')\n return\n else:\n print(\n f'already existing {args.output_dir}. but without weight file and tsv files ...'\n )\n os.system(f'rm -r {args.output_dir}')\n assert args.context_mode in ['full', 'center', 'left', 'right']\n source_model = os.path.join(args.output_dir,\n f'{args.model_prefix}pytorch_model.bin')\n dest_model = os.path.join(args.output_dir, 'pytorch_model.bin')\n rm_model = False\n if args.do_eval:\n if not os.path.exists(source_model):\n print(f'returning ... not found {source_model}')\n return\n if source_model != dest_model:\n rm_model = True\n dest_tmp_model_path = tempfile.mkdtemp()\n os.system(\n f\"cp {source_model} {os.path.join(dest_tmp_model_path, 'pytorch_model.bin')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'config.json')} {os.path.join(dest_tmp_model_path, 'config.json')}\"\n )\n os.system(\n f\"cp {os.path.join(args.output_dir, 'vocab.txt')} {os.path.join(dest_tmp_model_path, 'vocab.txt')}\"\n )\n else:\n dest_tmp_model_path = args.output_dir\n device = torch.device('cuda' if torch.cuda.is_available() and not args.\n no_cuda else 'cpu')\n n_gpu = torch.cuda.device_count()\n if args.gradient_accumulation_steps < 1:\n raise ValueError('gradient_accumulation_steps parameter should be >= 1'\n )\n args.train_batch_size = (args.train_batch_size // args.\n gradient_accumulation_steps)\n if args.do_train:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n if not args.do_train and not args.do_eval:\n raise ValueError(\n 'At least one of `do_train` or `do_eval` must be True.')\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n elif args.do_train or args.do_validate:\n raise ValueError(args.output_dir, 'output_dir already exists')\n suffix = datetime.now().isoformat().replace('-', '_').replace(':', '_'\n ).split('.')[0].replace('T', '-')\n if args.do_train:\n train_writer = SummaryWriter(log_dir=os.path.join('tensorboard',\n args.output_dir, 'train'))\n dev_writer = SummaryWriter(log_dir=os.path.join('tensorboard', args\n .output_dir, 'dev'))\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'train_{suffix}.log'), 'w'))\n eval_logger.addHandler(logging.FileHandler(os.path.join(args.\n output_dir, f'scores_{suffix}.log'), 'w'))\n else:\n logger.addHandler(logging.FileHandler(os.path.join(args.output_dir,\n f'eval_{suffix}.log'), 'w'))\n logger.info(args)\n logger.info('device: {}, n_gpu: {}'.format(device, n_gpu))\n processor = DataProcessor(filter_task_1=args.filter_task_1,\n filter_task_3=args.filter_task_3)\n eval_metrics = {eval_metric: (True) for eval_metric in args.\n eval_metrics.split('+')}\n if args.filter_task_1 and args.do_train:\n assert args.sent_type_clf_weight == 0.0\n eval_metrics.pop('sent_type_1_f1-score')\n if args.filter_task_3 and args.do_train:\n assert args.relations_sequence_clf_weight == 0.0\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n if (args.sent_type_clf_weight == 0.0 and 'sent_type_1_f1-score' in\n eval_metrics):\n eval_metrics.pop('sent_type_1_f1-score')\n if (args.tags_sequence_clf_weight == 0.0 and \n 'tags_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('tags_sequence_macro-avg_f1-score')\n if (args.relations_sequence_clf_weight == 0.0 and \n 'relations_sequence_macro-avg_f1-score' in eval_metrics):\n eval_metrics.pop('relations_sequence_macro-avg_f1-score')\n assert len(eval_metrics) > 0, 'inconsistent train params'\n if args.context_mode != 'full':\n keys = list(eval_metrics.keys())\n for key in keys:\n if key != 'sent_type_1_f1-score':\n eval_metrics.pop(key)\n assert 'sent_type_1_f1-score' in eval_metrics\n sent_type_labels_list = processor.get_sent_type_labels(args.data_dir,\n logger)\n tags_sequence_labels_list = processor.get_sequence_labels(args.data_dir,\n logger=logger, sequence_type='tags_sequence')\n relations_sequence_labels_list = processor.get_sequence_labels(args.\n data_dir, logger=logger, sequence_type='relations_sequence')\n label2id = {'sent_type': {label: i for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {label: i for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {\n label: i for i, label in enumerate(relations_sequence_labels_list, 1)}}\n id2label = {'sent_type': {i: label for i, label in enumerate(\n sent_type_labels_list)}, 'tags_sequence': {i: label for i, label in\n enumerate(tags_sequence_labels_list, 1)}, 'relations_sequence': {i:\n label for i, label in enumerate(relations_sequence_labels_list, 1)}}\n num_sent_type_labels = len(sent_type_labels_list)\n num_tags_sequence_labels = len(tags_sequence_labels_list) + 1\n num_relations_sequence_labels = len(relations_sequence_labels_list) + 1\n do_lower_case = 'uncased' in args.model\n tokenizer = tokenizers[args.model].from_pretrained(args.model,\n do_lower_case=do_lower_case)\n model_name = args.model\n if args.do_train:\n config = configs[args.model]\n config = config.from_pretrained(args.model, hidden_dropout_prob=\n args.dropout)\n model = models[model_name].from_pretrained(args.model, cache_dir=\n str(PYTORCH_PRETRAINED_BERT_CACHE), num_sent_type_labels=\n num_sent_type_labels, num_tags_sequence_labels=\n num_tags_sequence_labels, num_relations_sequence_labels=\n num_relations_sequence_labels, sent_type_clf_weight=args.\n sent_type_clf_weight, tags_sequence_clf_weight=args.\n tags_sequence_clf_weight, relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type, config=config)\n print('task weights:', model.sent_type_clf_weight, model.\n tags_sequence_clf_weight, model.relations_sequence_clf_weight)\n else:\n model = models[model_name].from_pretrained(dest_tmp_model_path,\n num_sent_type_labels=num_sent_type_labels,\n num_tags_sequence_labels=num_tags_sequence_labels,\n num_relations_sequence_labels=num_relations_sequence_labels,\n sent_type_clf_weight=args.sent_type_clf_weight,\n tags_sequence_clf_weight=args.tags_sequence_clf_weight,\n relations_sequence_clf_weight=args.\n relations_sequence_clf_weight, pooling_type=args.\n subtokens_pooling_type)\n model.to(device)\n eval_examples = processor.get_dev_examples(args.data_dir)\n eval_features, eval_new_examples = model.convert_examples_to_features(\n eval_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Dev *****')\n logger.info(' Num examples = %d', len(eval_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids\n ) = get_dataloader_and_tensors(eval_features, args.eval_batch_size)\n if not args.do_eval:\n test_file = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = model.convert_examples_to_features(\n test_examples, label2id, args.max_seq_length, tokenizer, logger,\n args.sequence_mode, context_mode=args.context_mode)\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids, test_relations_sequence_labels_ids\n ) = (get_dataloader_and_tensors(test_features, args.\n eval_batch_size))\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n train_features, _ = model.convert_examples_to_features(train_examples,\n label2id, args.max_seq_length, tokenizer, logger, args.\n sequence_mode, context_mode=args.context_mode)\n if args.train_mode == 'sorted' or args.train_mode == 'random_sorted':\n train_features = sorted(train_features, key=lambda f: np.sum(f.\n input_mask))\n else:\n random.shuffle(train_features)\n (train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids) = (get_dataloader_and_tensors(\n train_features, args.train_batch_size))\n train_batches = [batch for batch in train_dataloader]\n num_train_optimization_steps = len(train_dataloader\n ) // args.gradient_accumulation_steps * args.num_train_epochs\n warmup_steps = int(args.warmup_proportion *\n num_train_optimization_steps)\n logger.info('***** Training *****')\n logger.info(' Num examples = %d', len(train_examples))\n logger.info(' Batch size = %d', args.train_batch_size)\n logger.info(' Num steps = %d', num_train_optimization_steps)\n best_result = defaultdict(float)\n for eval_metric in eval_metrics:\n best_result[eval_metric] = args.threshold\n if eval_metric.startswith('sent_type'):\n best_result[eval_metric] += 0.2\n print('best results thresholds:')\n print(best_result)\n eval_step = max(1, len(train_batches) // args.eval_per_epoch)\n lr = float(args.learning_rate)\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [{'params': [param for name, param in\n param_optimizer if not any(nd in name for nd in no_decay)],\n 'weight_decay': float(args.weight_decay)}, {'params': [param for\n name, param in param_optimizer if any(nd in name for nd in\n no_decay)], 'weight_decay': 0.0}]\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n if args.lr_schedule == 'constant_warmup':\n print('lr schedule = constant_warmup')\n scheduler = get_constant_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps)\n else:\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=\n num_train_optimization_steps)\n start_time = time.time()\n global_step = 0\n for epoch in range(1, 1 + int(args.num_train_epochs)):\n tr_loss = 0\n nb_tr_examples = 0\n nb_tr_steps = 0\n cur_train_loss = defaultdict(float)\n model.train()\n logger.info('Start epoch #{} (lr = {})...'.format(epoch, lr))\n if (args.train_mode == 'random' or args.train_mode ==\n 'random_sorted'):\n random.shuffle(train_batches)\n for step, batch in enumerate(tqdm(train_batches, total=len(\n train_batches), desc='training ... ')):\n batch = tuple(t.to(device) for t in batch)\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n train_loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask,\n sent_type_labels=sent_type_labels_ids,\n tags_sequence_labels=tags_sequence_labels_ids,\n relations_sequence_labels=relations_sequence_labels_ids,\n token_valid_pos_ids=token_valid_pos_ids, return_outputs\n =False, device=device)\n for key in train_loss:\n cur_train_loss[key] += train_loss[key].mean().item()\n loss_to_optimize = train_loss['weighted_loss']\n if n_gpu > 1:\n loss_to_optimize = loss_to_optimize.mean()\n if args.gradient_accumulation_steps > 1:\n loss_to_optimize = (loss_to_optimize / args.\n gradient_accumulation_steps)\n loss_to_optimize.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.\n max_grad_norm)\n tr_loss += loss_to_optimize.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n global_step += 1\n if args.do_validate and (step + 1) % eval_step == 0:\n logger.info(\n 'Ep: {}, Stp: {}/{}, usd_t={:.2f}s, loss={:.6f}'.\n format(epoch, step + 1, len(train_batches), time.\n time() - start_time, tr_loss / nb_tr_steps))\n predict_for_metrics = []\n cur_train_mean_loss = {}\n for key in cur_train_loss:\n cur_train_mean_loss[f'train_{key}'] = cur_train_loss[\n key] / nb_tr_steps\n preds, result, scores = evaluate(model, device,\n eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids,\n eval_relations_sequence_labels_ids, label2id,\n cur_train_mean_loss=cur_train_mean_loss, logger=\n eval_logger)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'] = (args.train_batch_size * args.\n gradient_accumulation_steps)\n for key, value in result.items():\n dev_writer.add_scalar(key, value, global_step)\n for key, value in cur_train_mean_loss.items():\n train_writer.add_scalar(f'running_train_{key}',\n value, global_step)\n logger.info('First 20 predictions:')\n for sent_type_pred, sent_type_label in zip(preds[\n 'sent_type'][:20], eval_sent_type_labels_ids.numpy(\n )[:20]):\n sign = (u'✓' if sent_type_pred == sent_type_label else\n u'✘')\n logger.info('pred = %s, label = %s %s' % (id2label[\n 'sent_type'][sent_type_pred], id2label[\n 'sent_type'][sent_type_label], sign))\n for eval_metric in eval_metrics:\n if result[eval_metric] > best_result[eval_metric]:\n best_result[eval_metric] = result[eval_metric]\n logger.info(\n '!!! Best dev %s (lr=%s, epoch=%d): %.2f' %\n (eval_metric, str(lr), epoch, result[\n eval_metric] * 100.0))\n predict_for_metrics.append(eval_metric)\n for metric_id, eval_metric in tqdm(enumerate(\n predict_for_metrics), total=len(predict_for_metrics\n ), desc='writing predictions ... '):\n dest_file = f'dev_best_{eval_metric}'\n write_predictions(args, eval_new_examples,\n eval_features, preds, scores, dest_file,\n label2id=label2id, id2label=id2label, metrics=\n result)\n if metric_id == 0:\n test_preds, test_result, test_scores = evaluate(\n model, device, test_dataloader,\n test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids,\n label2id, cur_train_mean_loss=None, logger=None\n )\n output_model_file = os.path.join(args.\n output_dir,\n f'best_{eval_metric}_{WEIGHTS_NAME}')\n save_model(args, model, tokenizer,\n output_model_file)\n for metric in predict_for_metrics[1:]:\n dest_model_path = os.path.join(args.\n output_dir, f'best_{metric}_{WEIGHTS_NAME}'\n )\n os.system(\n f'cp {output_model_file} {dest_model_path}'\n )\n dest_file = f'test_best_{eval_metric}'\n write_predictions(args, test_new_examples,\n test_features, test_preds, test_scores,\n dest_file, label2id=label2id, id2label=id2label,\n metrics=test_result)\n if args.log_train_metrics:\n preds, result, scores = evaluate(model, device,\n train_dataloader, sent_type_ids, tags_sequence_ids,\n relations_sequence_ids, label2id, logger=logger,\n skip_every_n_examples=args.skip_every_n_examples)\n result['global_step'] = global_step\n result['epoch'] = epoch\n result['learning_rate'] = lr\n result['batch_size'\n ] = args.train_batch_size * args.gradient_accumulation_steps\n for key, value in result.items():\n train_writer.add_scalar(key, value, global_step)\n if args.do_eval:\n test_files = os.path.join(args.data_dir, 'test.json'\n ) if args.test_file == '' else args.test_file\n for test_file in test_files.split('8'):\n test_examples = processor.get_test_examples(test_file)\n test_features, test_new_examples = (model.\n convert_examples_to_features(test_examples, label2id, args.\n max_seq_length, tokenizer, logger, args.sequence_mode,\n context_mode=args.context_mode))\n logger.info('***** Test *****')\n logger.info(' Num examples = %d', len(test_examples))\n logger.info(' Batch size = %d', args.eval_batch_size)\n (test_dataloader, test_sent_type_labels_ids,\n test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids) = (\n get_dataloader_and_tensors(test_features, args.eval_batch_size)\n )\n preds, result, scores = evaluate(model, device, test_dataloader,\n test_sent_type_labels_ids, test_tags_sequence_labels_ids,\n test_relations_sequence_labels_ids, label2id,\n compute_metrics=False)\n dest_file = args.model_prefix + test_file.split('/')[-1].replace(\n '.json', '')\n write_predictions(args, test_new_examples, test_features, preds,\n scores, dest_file, label2id=label2id, id2label=id2label,\n metrics=result)\n if rm_model:\n shutil.rmtree(dest_tmp_model_path)\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(args, examples, features, preds, scores, dest_file,\n label2id, id2label, metrics=None):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {'tags_sequence': 'O', 'relations_sequence': '0'}\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [(list(pred[orig_positions]) + [label2id\n [task][neg_label_mapper[task]]] * (len(ex.tokens) - len(\n orig_positions))) for pred, orig_positions, ex in zip(preds[\n task], orig_positions_map, examples)]\n aggregated_results[f'{task}_scores'] = [(list(score[orig_positions]\n ) + [0.999] * (len(ex.tokens) - len(orig_positions))) for score,\n orig_positions, ex in zip(scores[task], orig_positions_map,\n examples)]\n prediction_results = {'idx': [ex.guid for ex in examples], 'tokens': [\n ' '.join(ex.tokens) for ex in examples], 'sent_type_label': [ex.\n sent_type for ex in examples], 'sent_type_pred': [id2label[\n 'sent_type'][x] for x in preds['sent_type']], 'sent_type_scores': [\n str(score) for score in scores['sent_type']], 'sent_start': [ex.\n sent_start for ex in examples], 'sent_end': [ex.sent_end for ex in\n examples], 'tags_sequence_labels': [' '.join(ex.tags_sequence) for\n ex in examples], 'tags_sequence_pred': [' '.join([(id2label[\n 'tags_sequence'][x] if x != 0 else 'O') for x in sent]) for sent in\n aggregated_results['tags_sequence']], 'tags_sequence_scores': [' '.\n join([str(score) for score in sent]) for sent in aggregated_results\n ['tags_sequence_scores']], 'tags_ids': [' '.join(ex.tags_ids) for\n ex in examples], 'relations_sequence_labels': [' '.join(ex.\n relations_sequence) for ex in examples], 'relations_sequence_pred':\n [' '.join([(id2label['relations_sequence'][x] if x != 0 else '0') for\n x in sent]) for sent in aggregated_results['relations_sequence']],\n 'relations_sequence_scores': [' '.join([str(score) for score in\n sent]) for sent in aggregated_results['relations_sequence_scores']],\n 'subj_start': [ex.subj_start for ex in examples], 'subj_end': [ex.\n subj_end for ex in examples], 'infile_offsets': [' '.join([str(\n offset) for offset in ex.infile_offsets]) for ex in examples],\n 'start_char': [' '.join(ex.start_char) for ex in examples],\n 'end_char': [' '.join(ex.end_char) for ex in examples], 'source': [\n ex.source for ex in examples]}\n prediction_results = pd.DataFrame(prediction_results)\n prediction_results.to_csv(os.path.join(args.output_dir,\n f'{dest_file}.tsv'), sep='\\t', index=False)\n if metrics is not None:\n with open(os.path.join(args.output_dir,\n f'{dest_file}_eval_results.txt'), 'w') as f:\n for key in sorted(metrics.keys()):\n f.write('%s = %s\\n' % (key, str(metrics[key])))\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n\n\ndef compute_all_metrics(sent_type_labels, sent_type_preds,\n tags_sequence_labels, tags_sequence_preds, relations_sequence_labels,\n relations_sequence_preds, label2id, loss_info=None, logger=None):\n eval_tags_sequence_labels = [label2id['tags_sequence'][lab] for lab in\n EVAL_TAGS]\n eval_relations_sequence_labels = [label2id['relations_sequence'][lab] for\n lab in EVAL_RELATIONS]\n task_1_report = classification_report(sent_type_labels, sent_type_preds,\n labels=[0, 1], output_dict=True)\n task_2_report = classification_report(tags_sequence_labels,\n tags_sequence_preds, labels=eval_tags_sequence_labels, output_dict=True\n )\n task_3_report = classification_report(relations_sequence_labels,\n relations_sequence_preds, labels=eval_relations_sequence_labels,\n output_dict=True)\n result = {}\n for x in ['0', '1', 'weighted avg', 'macro avg']:\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f\"sent_type_{x.replace(' ', '-')}_{metrics}\"] = round(\n task_1_report[x][metrics], 6)\n id2label = {val: key for key, val in label2id['tags_sequence'].items()}\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_tags_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'tags_sequence_{id2label[x]}_{metrics}'] = round(\n task_2_report[str(x)][metrics], 6)\n id2label = {val: key for key, val in label2id['relations_sequence'].items()\n }\n id2label['weighted avg'] = 'weighted-avg'\n id2label['macro avg'] = 'macro-avg'\n for x in (eval_relations_sequence_labels + ['weighted avg', 'macro avg']):\n for metrics in ['precision', 'recall', 'f1-score', 'support']:\n result[f'relations_sequence_{id2label[x]}_{metrics}'] = round(\n task_3_report[str(x)][metrics], 6)\n if logger is not None:\n logger.info('=====================================')\n for key in sorted(result.keys()):\n logger.info(' %s = %s', key, str(result[key]))\n if loss_info is not None:\n for key in sorted(loss_info.keys()):\n logger.info(' %s = %s', key, str(loss_info[key]))\n return result\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\n<function token>\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(args, examples, features, preds, scores, dest_file,\n label2id, id2label, metrics=None):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {'tags_sequence': 'O', 'relations_sequence': '0'}\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [(list(pred[orig_positions]) + [label2id\n [task][neg_label_mapper[task]]] * (len(ex.tokens) - len(\n orig_positions))) for pred, orig_positions, ex in zip(preds[\n task], orig_positions_map, examples)]\n aggregated_results[f'{task}_scores'] = [(list(score[orig_positions]\n ) + [0.999] * (len(ex.tokens) - len(orig_positions))) for score,\n orig_positions, ex in zip(scores[task], orig_positions_map,\n examples)]\n prediction_results = {'idx': [ex.guid for ex in examples], 'tokens': [\n ' '.join(ex.tokens) for ex in examples], 'sent_type_label': [ex.\n sent_type for ex in examples], 'sent_type_pred': [id2label[\n 'sent_type'][x] for x in preds['sent_type']], 'sent_type_scores': [\n str(score) for score in scores['sent_type']], 'sent_start': [ex.\n sent_start for ex in examples], 'sent_end': [ex.sent_end for ex in\n examples], 'tags_sequence_labels': [' '.join(ex.tags_sequence) for\n ex in examples], 'tags_sequence_pred': [' '.join([(id2label[\n 'tags_sequence'][x] if x != 0 else 'O') for x in sent]) for sent in\n aggregated_results['tags_sequence']], 'tags_sequence_scores': [' '.\n join([str(score) for score in sent]) for sent in aggregated_results\n ['tags_sequence_scores']], 'tags_ids': [' '.join(ex.tags_ids) for\n ex in examples], 'relations_sequence_labels': [' '.join(ex.\n relations_sequence) for ex in examples], 'relations_sequence_pred':\n [' '.join([(id2label['relations_sequence'][x] if x != 0 else '0') for\n x in sent]) for sent in aggregated_results['relations_sequence']],\n 'relations_sequence_scores': [' '.join([str(score) for score in\n sent]) for sent in aggregated_results['relations_sequence_scores']],\n 'subj_start': [ex.subj_start for ex in examples], 'subj_end': [ex.\n subj_end for ex in examples], 'infile_offsets': [' '.join([str(\n offset) for offset in ex.infile_offsets]) for ex in examples],\n 'start_char': [' '.join(ex.start_char) for ex in examples],\n 'end_char': [' '.join(ex.end_char) for ex in examples], 'source': [\n ex.source for ex in examples]}\n prediction_results = pd.DataFrame(prediction_results)\n prediction_results.to_csv(os.path.join(args.output_dir,\n f'{dest_file}.tsv'), sep='\\t', index=False)\n if metrics is not None:\n with open(os.path.join(args.output_dir,\n f'{dest_file}_eval_results.txt'), 'w') as f:\n for key in sorted(metrics.keys()):\n f.write('%s = %s\\n' % (key, str(metrics[key])))\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\n<function token>\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\ndef write_predictions(args, examples, features, preds, scores, dest_file,\n label2id, id2label, metrics=None):\n aggregated_results = {}\n orig_positions_map = [ex.orig_positions_map for ex in features]\n neg_label_mapper = {'tags_sequence': 'O', 'relations_sequence': '0'}\n for task in ['tags_sequence', 'relations_sequence']:\n aggregated_results[task] = [(list(pred[orig_positions]) + [label2id\n [task][neg_label_mapper[task]]] * (len(ex.tokens) - len(\n orig_positions))) for pred, orig_positions, ex in zip(preds[\n task], orig_positions_map, examples)]\n aggregated_results[f'{task}_scores'] = [(list(score[orig_positions]\n ) + [0.999] * (len(ex.tokens) - len(orig_positions))) for score,\n orig_positions, ex in zip(scores[task], orig_positions_map,\n examples)]\n prediction_results = {'idx': [ex.guid for ex in examples], 'tokens': [\n ' '.join(ex.tokens) for ex in examples], 'sent_type_label': [ex.\n sent_type for ex in examples], 'sent_type_pred': [id2label[\n 'sent_type'][x] for x in preds['sent_type']], 'sent_type_scores': [\n str(score) for score in scores['sent_type']], 'sent_start': [ex.\n sent_start for ex in examples], 'sent_end': [ex.sent_end for ex in\n examples], 'tags_sequence_labels': [' '.join(ex.tags_sequence) for\n ex in examples], 'tags_sequence_pred': [' '.join([(id2label[\n 'tags_sequence'][x] if x != 0 else 'O') for x in sent]) for sent in\n aggregated_results['tags_sequence']], 'tags_sequence_scores': [' '.\n join([str(score) for score in sent]) for sent in aggregated_results\n ['tags_sequence_scores']], 'tags_ids': [' '.join(ex.tags_ids) for\n ex in examples], 'relations_sequence_labels': [' '.join(ex.\n relations_sequence) for ex in examples], 'relations_sequence_pred':\n [' '.join([(id2label['relations_sequence'][x] if x != 0 else '0') for\n x in sent]) for sent in aggregated_results['relations_sequence']],\n 'relations_sequence_scores': [' '.join([str(score) for score in\n sent]) for sent in aggregated_results['relations_sequence_scores']],\n 'subj_start': [ex.subj_start for ex in examples], 'subj_end': [ex.\n subj_end for ex in examples], 'infile_offsets': [' '.join([str(\n offset) for offset in ex.infile_offsets]) for ex in examples],\n 'start_char': [' '.join(ex.start_char) for ex in examples],\n 'end_char': [' '.join(ex.end_char) for ex in examples], 'source': [\n ex.source for ex in examples]}\n prediction_results = pd.DataFrame(prediction_results)\n prediction_results.to_csv(os.path.join(args.output_dir,\n f'{dest_file}.tsv'), sep='\\t', index=False)\n if metrics is not None:\n with open(os.path.join(args.output_dir,\n f'{dest_file}_eval_results.txt'), 'w') as f:\n for key in sorted(metrics.keys()):\n f.write('%s = %s\\n' % (key, str(metrics[key])))\n\n\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef evaluate(model, device, eval_dataloader, eval_sent_type_labels_ids,\n eval_tags_sequence_labels_ids, eval_relations_sequence_labels_ids,\n label2id, compute_metrics=True, verbose=False, cur_train_mean_loss=None,\n logger=None, skip_every_n_examples=1):\n model.eval()\n num_sent_type_labels = model.num_sent_type_labels\n num_tags_sequence_labels = model.num_tags_sequence_labels\n num_relations_sequence_labels = model.num_relations_sequence_labels\n sent_type_clf_weight = model.sent_type_clf_weight\n tags_sequence_clf_weight = model.tags_sequence_clf_weight\n relations_sequence_clf_weight = model.relations_sequence_clf_weight\n eval_loss = defaultdict(float)\n nb_eval_steps = 0\n preds = defaultdict(list)\n for batch_id, batch in enumerate(tqdm(eval_dataloader, total=len(\n eval_dataloader), desc='validation ... ')):\n if skip_every_n_examples != 1 and (batch_id + 1\n ) % skip_every_n_examples != 1:\n continue\n batch = tuple([elem.to(device) for elem in batch])\n (input_ids, input_mask, segment_ids, sent_type_labels_ids,\n tags_sequence_labels_ids, relations_sequence_labels_ids,\n token_valid_pos_ids) = batch\n with torch.no_grad():\n outputs, loss = model(input_ids=input_ids, token_type_ids=\n segment_ids, attention_mask=input_mask, sent_type_labels=\n sent_type_labels_ids, tags_sequence_labels=\n tags_sequence_labels_ids, relations_sequence_labels=\n relations_sequence_labels_ids, token_valid_pos_ids=\n token_valid_pos_ids, device=device)\n (sent_type_logits, tags_sequence_logits, relations_sequence_logits\n ) = outputs[:3]\n if compute_metrics:\n eval_loss['sent_type_loss'] += loss['sent_type_loss'].mean().item()\n eval_loss['tags_sequence_loss'] += loss['tags_sequence_loss'].mean(\n ).item()\n eval_loss['relations_sequence_loss'] += loss[\n 'relations_sequence_loss'].mean().item()\n eval_loss['weighted_loss'] += loss['weighted_loss'].mean().item()\n nb_eval_steps += 1\n preds['sent_type'].append(sent_type_logits.detach().cpu().numpy())\n preds['tags_sequence'].append(tags_sequence_logits.detach().cpu().\n numpy())\n preds['relations_sequence'].append(relations_sequence_logits.detach\n ().cpu().numpy())\n preds['sent_type'] = np.concatenate(preds['sent_type'], axis=0)\n preds['tags_sequence'] = np.concatenate(preds['tags_sequence'], axis=0)\n preds['relations_sequence'] = np.concatenate(preds['relations_sequence'\n ], axis=0)\n scores = {}\n for key in preds:\n scores[key] = softmax(preds[key], axis=-1).max(axis=-1)\n preds[key] = preds[key].argmax(axis=-1)\n if compute_metrics:\n for key in eval_loss:\n eval_loss[key] = eval_loss[key] / nb_eval_steps\n if cur_train_mean_loss is not None:\n eval_loss.update(cur_train_mean_loss)\n result = compute_all_metrics(eval_sent_type_labels_ids.numpy(),\n preds['sent_type'], np.array([x for y in\n eval_tags_sequence_labels_ids.numpy() for x in y]), np.array([x for\n y in preds['tags_sequence'] for x in y]), np.array([x for y in\n eval_relations_sequence_labels_ids.numpy() for x in y]), np.\n array([x for y in preds['relations_sequence'] for x in y]),\n label2id, loss_info=eval_loss, logger=logger)\n else:\n result = {}\n for key in eval_loss:\n result[key] = eval_loss[key]\n model.train()\n return preds, result, scores\n\n\n<function token>\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef save_model(args, model, tokenizer, output_model_file):\n start = time.time()\n model_to_save = model.module if hasattr(model, 'module') else model\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n print(\n f'model saved in {time.time() - start} seconds to {output_model_file}')\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,489 |
02c5251fd9dbd13446f1d7f946a896cb54f5633d
|
import re
def get_file_name(url):
return re.findall(r"/forum/.*", url)[0][18::]
|
[
"import re\n\ndef get_file_name(url):\n return re.findall(r\"/forum/.*\", url)[0][18::]",
"import re\n\n\ndef get_file_name(url):\n return re.findall('/forum/.*', url)[0][18:]\n",
"<import token>\n\n\ndef get_file_name(url):\n return re.findall('/forum/.*', url)[0][18:]\n",
"<import token>\n<function token>\n"
] | false |
98,490 |
c9b51c33297fdef45eda5629e97a58272adced0c
|
import models
from django.contrib import admin
import multilingual
class ChoiceInline(admin.StackedInline):
model = models.Choice
extra = 4
class SingleChoicePollAdmin(multilingual.ModelAdmin):
inlines = [ChoiceInline]
prepopulated_fields = {"slug": ("question_en",)}
class PercentagePollAdmin(multilingual.ModelAdmin):
prepopulated_fields = {"slug": ("question_en",)}
admin.site.register(models.SingleChoicePoll, SingleChoicePollAdmin)
admin.site.register(models.PercentagePoll, PercentagePollAdmin)
admin.site.register(models.Choice, multilingual.ModelAdmin)
admin.site.register(models.Percentage, admin.ModelAdmin)
admin.site.register(models.LoggedVote, admin.ModelAdmin)
|
[
"import models\r\nfrom django.contrib import admin\r\nimport multilingual\r\n\r\nclass ChoiceInline(admin.StackedInline):\r\n model = models.Choice\r\n extra = 4\r\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\r\n inlines = [ChoiceInline]\r\n prepopulated_fields = {\"slug\": (\"question_en\",)}\r\n\r\nclass PercentagePollAdmin(multilingual.ModelAdmin):\r\n prepopulated_fields = {\"slug\": (\"question_en\",)}\r\n\r\nadmin.site.register(models.SingleChoicePoll, SingleChoicePollAdmin)\r\nadmin.site.register(models.PercentagePoll, PercentagePollAdmin)\r\nadmin.site.register(models.Choice, multilingual.ModelAdmin)\r\nadmin.site.register(models.Percentage, admin.ModelAdmin)\r\nadmin.site.register(models.LoggedVote, admin.ModelAdmin)",
"import models\nfrom django.contrib import admin\nimport multilingual\n\n\nclass ChoiceInline(admin.StackedInline):\n model = models.Choice\n extra = 4\n\n\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\n inlines = [ChoiceInline]\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nadmin.site.register(models.SingleChoicePoll, SingleChoicePollAdmin)\nadmin.site.register(models.PercentagePoll, PercentagePollAdmin)\nadmin.site.register(models.Choice, multilingual.ModelAdmin)\nadmin.site.register(models.Percentage, admin.ModelAdmin)\nadmin.site.register(models.LoggedVote, admin.ModelAdmin)\n",
"<import token>\n\n\nclass ChoiceInline(admin.StackedInline):\n model = models.Choice\n extra = 4\n\n\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\n inlines = [ChoiceInline]\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nadmin.site.register(models.SingleChoicePoll, SingleChoicePollAdmin)\nadmin.site.register(models.PercentagePoll, PercentagePollAdmin)\nadmin.site.register(models.Choice, multilingual.ModelAdmin)\nadmin.site.register(models.Percentage, admin.ModelAdmin)\nadmin.site.register(models.LoggedVote, admin.ModelAdmin)\n",
"<import token>\n\n\nclass ChoiceInline(admin.StackedInline):\n model = models.Choice\n extra = 4\n\n\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\n inlines = [ChoiceInline]\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\n<code token>\n",
"<import token>\n\n\nclass ChoiceInline(admin.StackedInline):\n <assignment token>\n <assignment token>\n\n\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\n inlines = [ChoiceInline]\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\n inlines = [ChoiceInline]\n prepopulated_fields = {'slug': ('question_en',)}\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass SingleChoicePollAdmin(multilingual.ModelAdmin):\n <assignment token>\n <assignment token>\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n prepopulated_fields = {'slug': ('question_en',)}\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass PercentagePollAdmin(multilingual.ModelAdmin):\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
98,491 |
d26671c18d293760fc203b131f2c8620ba686228
|
from base.application.components import Base
from base.application.components import api
from base.application.components import params
from base.application.components import authenticated
import base.common.orm
from base.common.sequencer import sequencer
import datetime
import decimal
import json
# @authenticated() # if every http method has to be authenticated
@api(
URI='/testimonials'
)
class Testimonial(Base):
def get(self):
Testimonial, _session = base.common.orm.get_orm_model('testimonials')
_all = _session.query(Testimonial).all()
res = []
for test in _all:
res.append({
"name": test.name,
"image": test.image,
"description": test.description
})
return self.ok({'testimonials': res})
@params(
{'name': 'testimonial_data', 'type': json, 'doc': 'data of testimonial to add'}
)
def post(self, testimonial_data):
Testimonial, _session = base.common.orm.get_orm_model('testimonials')
print('data ', testimonial_data)
sid = sequencer().new('t')
testimonial = Testimonial(sid,
testimonial_data['name'],
testimonial_data['image'],
testimonial_data['description'])
try:
_session.add(testimonial)
_session.commit()
return self.ok({'added': sid})
except Exception as e:
return self.error('{}'.format(e))
|
[
"from base.application.components import Base\nfrom base.application.components import api\nfrom base.application.components import params\nfrom base.application.components import authenticated\nimport base.common.orm\nfrom base.common.sequencer import sequencer\nimport datetime\nimport decimal\nimport json\n\n\n# @authenticated() # if every http method has to be authenticated\n@api(\n URI='/testimonials'\n)\nclass Testimonial(Base):\n\n def get(self):\n\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n\n _all = _session.query(Testimonial).all()\n\n res = []\n\n for test in _all:\n\n res.append({\n \"name\": test.name,\n \"image\": test.image,\n \"description\": test.description\n })\n\n return self.ok({'testimonials': res})\n\n\n @params(\n {'name': 'testimonial_data', 'type': json, 'doc': 'data of testimonial to add'}\n )\n def post(self, testimonial_data):\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n\n print('data ', testimonial_data)\n\n sid = sequencer().new('t')\n\n testimonial = Testimonial(sid,\n testimonial_data['name'],\n testimonial_data['image'],\n testimonial_data['description'])\n\n try:\n _session.add(testimonial)\n _session.commit()\n return self.ok({'added': sid})\n\n except Exception as e:\n return self.error('{}'.format(e))\n\n",
"from base.application.components import Base\nfrom base.application.components import api\nfrom base.application.components import params\nfrom base.application.components import authenticated\nimport base.common.orm\nfrom base.common.sequencer import sequencer\nimport datetime\nimport decimal\nimport json\n\n\n@api(URI='/testimonials')\nclass Testimonial(Base):\n\n def get(self):\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n _all = _session.query(Testimonial).all()\n res = []\n for test in _all:\n res.append({'name': test.name, 'image': test.image,\n 'description': test.description})\n return self.ok({'testimonials': res})\n\n @params({'name': 'testimonial_data', 'type': json, 'doc':\n 'data of testimonial to add'})\n def post(self, testimonial_data):\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n print('data ', testimonial_data)\n sid = sequencer().new('t')\n testimonial = Testimonial(sid, testimonial_data['name'],\n testimonial_data['image'], testimonial_data['description'])\n try:\n _session.add(testimonial)\n _session.commit()\n return self.ok({'added': sid})\n except Exception as e:\n return self.error('{}'.format(e))\n",
"<import token>\n\n\n@api(URI='/testimonials')\nclass Testimonial(Base):\n\n def get(self):\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n _all = _session.query(Testimonial).all()\n res = []\n for test in _all:\n res.append({'name': test.name, 'image': test.image,\n 'description': test.description})\n return self.ok({'testimonials': res})\n\n @params({'name': 'testimonial_data', 'type': json, 'doc':\n 'data of testimonial to add'})\n def post(self, testimonial_data):\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n print('data ', testimonial_data)\n sid = sequencer().new('t')\n testimonial = Testimonial(sid, testimonial_data['name'],\n testimonial_data['image'], testimonial_data['description'])\n try:\n _session.add(testimonial)\n _session.commit()\n return self.ok({'added': sid})\n except Exception as e:\n return self.error('{}'.format(e))\n",
"<import token>\n\n\n@api(URI='/testimonials')\nclass Testimonial(Base):\n\n def get(self):\n Testimonial, _session = base.common.orm.get_orm_model('testimonials')\n _all = _session.query(Testimonial).all()\n res = []\n for test in _all:\n res.append({'name': test.name, 'image': test.image,\n 'description': test.description})\n return self.ok({'testimonials': res})\n <function token>\n",
"<import token>\n\n\n@api(URI='/testimonials')\nclass Testimonial(Base):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,492 |
0da5ee0e879739bba00d206f19808aeb09287e92
|
/home/wxy/anaconda3/lib/python3.6/heapq.py
|
[
"/home/wxy/anaconda3/lib/python3.6/heapq.py"
] | true |
98,493 |
00e5628f968d2b2aa362819bfea6319ac50d8631
|
import io
import itertools
import math
import os
import random
from io import BytesIO
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageDraw, ImageFont
#from .color_util import ColorUtil
from matplotlib import patches
from .misc_util import BoundingBox
from .file_util import FileUtil
from .color_util import ColorUtil
from lxml import objectify
class VIUtil:
@staticmethod
def fig2buffer(fig):
fig.canvas.draw()
w,h=fig.canvas.get_width_height()
buf=np.fromstring(fig.canvas.tostring_argb(),dtype=np.uint8)
buf.shape=(w,h,4)
buf=np.roll(buf,3,axis=2)
return buf
@classmethod
def fig2img(cls,fig):
buf=cls.fig2buffer(fig)
w,h,d=buf.shape
img=Image.frombytes("RGBA",(w,h),buf.tostring())
return np.asarray(img)
@staticmethod
def fig2numpy(fig,dpi=180):
buf=BytesIO()
fig.savefig(buf,format="png",dpi=dpi)
buf.seek(0)
img_arr=np.frombuffer(buf.getvalue(),dtype=np.uint8)
buf.close()
img=cv2.imdecode(img_arr,1)
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img
@staticmethod
def _imshow(img: np.ndarray):
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.namedWindow("src",cv2.WINDOW_NORMAL)
cv2.resizeWindow("src",(800,600))
cv2.imshow("src",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
@staticmethod
def draw_mask(image,mask,color=(255,0,0),alpha=0.4):
assert image.dtype == np.uint8, '`image` not of type np.uint8'
assert mask.dtype == np.uint8, '`mask` not of type np.uint8'
if np.any(np.logical_and(mask != 1,mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s'%(image.shape[:2],mask.shape))
pil_image=Image.fromarray(image)
solid_color=np.expand_dims(
np.ones_like(mask),axis=2)*np.reshape(list(color),[1,1,3])
pil_solid_color=Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask=Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image=Image.composite(pil_solid_color,pil_image,pil_mask)
np.copyto(image,np.array(pil_image.convert('RGB')))
@classmethod
def imshow(cls,img,boxes, alpha=0.3):
labels=np.unique([box.label for box in boxes])
colors_dict=ColorUtil.rainbow(len(labels))
R=np.asarray(colors_dict["r"], dtype=np.float) / 255.0
G=np.asarray(colors_dict["g"], dtype=np.float) / 255.0
B=np.asarray(colors_dict["b"], dtype=np.float) / 255.0
A = [1.0] * len(labels)
palette=list(zip(B.tolist(),G.tolist(),R.tolist(), A))
colors={label: palette[i] for i,label in enumerate(labels)}
fig=plt.figure(figsize=(20,10))
ax=fig.add_subplot(111, aspect='equal')
for i,box in enumerate(boxes):
c=colors[box.label]
if isinstance(box.mask, np.ndarray):
mask = box.mask
rgb_color=tuple(map(lambda x: math.floor(x*255),c[:-1]))
cls.draw_mask(img, mask, color=rgb_color, alpha=alpha)
label="{} :{:.2f}".format(box.label,box.score)
ax.add_patch(
patches.Rectangle(
(box.x1,box.y1),
box.x2-box.x1,
box.y2-box.y1,
linewidth=3,
edgecolor=c,
facecolor='none',
fill=False
))
ax.text(
x=box.x1,
y=box.y1,
s=label,
color="white",
fontsize=12,
bbox=dict(boxstyle="round",facecolor=colors[box.label],alpha=0.9))
ax.set_axis_off()
ax.imshow(img)
#img = cls.fig2numpy(fig)
#cls._imshow(img)
plt.show()
#cv2.putText(rgb, label,(box.x1 + 10,box.y1 + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[box.label],2)
#fig.savefig('result_{}.png'.format(uuid.uuid4()), dpi=300, bbox_inches='tight')
return fig
@staticmethod
def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=None, labels_map =None):
'''
return the annotations of the image
:param image_path: image path
:return: a list with the bounding boxes
'''
try:
image_path = Path(image_path)
file_name = image_path.stem
if xml_path is None:
xml_path = str(image_path.parent.joinpath("{}.xml".format(file_name)))
mask_numpy = None
try:
if mask_path:
with open(mask_path, 'rb') as fid:
encoded_mask_png = fid.read()
encoded_png_io = io.BytesIO(encoded_mask_png)
pil_mask = Image.open(encoded_png_io)
if pil_mask.format != 'PNG':
raise ValueError('Mask image format not PNG')
mask_numpy = np.asarray(pil_mask)
except Exception as ex:
raise Exception("error reading the image")
# load image
with open(xml_path) as f:
xml = f.read()
root = objectify.fromstring(xml)
boxes = []
for item in root.object:
xmin = item.bndbox.xmin
ymin = item.bndbox.ymin
xmax = item.bndbox.xmax
ymax = item.bndbox.ymax
label = str(item.name)
box = BoundingBox(
x1=xmin,
x2=xmax,
y1=ymin,
y2=ymax,
label=label.title()
)
boxes.append(box)
if isinstance(mask_numpy, np.ndarray) and labels_map:
if label in labels_map:
class_id = labels_map[label]
binary_mask = np.zeros_like(mask_numpy)
binary_mask[mask_numpy == class_id] = 1
box.mask = binary_mask
boxes.append(box)
return boxes
except Exception as e:
print("Error reading the image {}".format(str(e)))
@classmethod
def make_grid(cls, images_folder, annotations_folder=None, masks_folder = None, n=4, rows=2, figsize = (10,10), fontsize=10, labels_map=None):
import matplotlib
matplotlib.use('WXAgg')
import matplotlib.pylab as plt
images_folder = Path(images_folder)
assert images_folder.exists(), "images folder not found"
if annotations_folder is None:
annotations_folder = images_folder
if masks_folder is None:
masks_folder = images_folder
# read files
img_files = FileUtil.get_files(images_folder, [".jpg", ".jpeg"])
xml_files = FileUtil.get_files(annotations_folder, [".xml"])
png_files = FileUtil.get_files(masks_folder, [".png"])
files = img_files + xml_files + png_files
files = sorted(files, key=lambda img: img.stem)
files = [(img_name,list(img_files)) for img_name, img_files in itertools.groupby(files, key=lambda img: img.stem)]
files = random.sample(files, k=n)
cols = math.ceil(n / rows)
if labels_map:
labels_map = {k.title():v for k,v in labels_map.items()}
# load annotations
annotations_dict = {}
for img_name, img_files in files:
if len(img_files) >= 2:
img_path = img_files[0]
xml_path = img_files[1]
mask_path = None
if len(img_files) == 3:
mask_path = img_files[2]
annotations_dict[img_path] = cls.get_voc_annotations(img_path, xml_path, mask_path, labels_map)
assert len(annotations_dict) > 0, "Not annotations found"
labels = set([box.label for boxes in annotations_dict.values() for box in boxes ])
labels_colors = dict(zip(labels, ColorUtil.rainbow_rgb(len(labels))))
# show annotations
fig = plt.figure(figsize=figsize)
for i, (img_path, img_boxes) in enumerate(annotations_dict.items()):
pil_image: Image = Image.open(img_path)
image_numpy = np.asarray(pil_image).copy()
# create figure
ax = fig.add_subplot(rows, cols, i + 1)
ax.set_axis_off()
for box in img_boxes:
label = box.label
color = labels_colors[label]
ax.add_patch(
patches.Rectangle(
(box.x1, box.y1),
box.x2 - box.x1,
box.y2 - box.y1,
linewidth=3,
edgecolor=np.asarray(color) /255,
facecolor='none',
fill=False
))
if isinstance(box.mask, np.ndarray) and labels_map:
cls.draw_mask(image_numpy, box.mask,color)
# ax.text(
# x=box[0][0] + 20,
# y=box[0][1] + 20,
# s=label,
# color="white",
# fontsize=12,
# bbox=dict(boxstyle="round", facecolor=np.array(color) / 255, alpha=0.9))
ax.imshow(image_numpy)
plt.show()
|
[
"import io\nimport itertools\nimport math\nimport os\nimport random\nfrom io import BytesIO\nfrom pathlib import Path\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\n#from .color_util import ColorUtil\nfrom matplotlib import patches\n\nfrom .misc_util import BoundingBox\nfrom .file_util import FileUtil\nfrom .color_util import ColorUtil\nfrom lxml import objectify\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w,h=fig.canvas.get_width_height()\n buf=np.fromstring(fig.canvas.tostring_argb(),dtype=np.uint8)\n buf.shape=(w,h,4)\n buf=np.roll(buf,3,axis=2)\n return buf\n\n @classmethod\n def fig2img(cls,fig):\n buf=cls.fig2buffer(fig)\n w,h,d=buf.shape\n img=Image.frombytes(\"RGBA\",(w,h),buf.tostring())\n return np.asarray(img)\n\n @staticmethod\n def fig2numpy(fig,dpi=180):\n buf=BytesIO()\n fig.savefig(buf,format=\"png\",dpi=dpi)\n buf.seek(0)\n img_arr=np.frombuffer(buf.getvalue(),dtype=np.uint8)\n buf.close()\n img=cv2.imdecode(img_arr,1)\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n return img\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n cv2.namedWindow(\"src\",cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"src\",(800,600))\n cv2.imshow(\"src\",img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image,mask,color=(255,0,0),alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1,mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError('The image has spatial dimensions %s but the mask has '\n 'dimensions %s'%(image.shape[:2],mask.shape))\n pil_image=Image.fromarray(image)\n solid_color=np.expand_dims(\n np.ones_like(mask),axis=2)*np.reshape(list(color),[1,1,3])\n pil_solid_color=Image.fromarray(np.uint8(solid_color)).convert('RGBA')\n pil_mask=Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')\n pil_image=Image.composite(pil_solid_color,pil_image,pil_mask)\n np.copyto(image,np.array(pil_image.convert('RGB')))\n\n @classmethod\n def imshow(cls,img,boxes, alpha=0.3):\n labels=np.unique([box.label for box in boxes])\n colors_dict=ColorUtil.rainbow(len(labels))\n R=np.asarray(colors_dict[\"r\"], dtype=np.float) / 255.0\n G=np.asarray(colors_dict[\"g\"], dtype=np.float) / 255.0\n B=np.asarray(colors_dict[\"b\"], dtype=np.float) / 255.0\n A = [1.0] * len(labels)\n palette=list(zip(B.tolist(),G.tolist(),R.tolist(), A))\n colors={label: palette[i] for i,label in enumerate(labels)}\n fig=plt.figure(figsize=(20,10))\n ax=fig.add_subplot(111, aspect='equal')\n for i,box in enumerate(boxes):\n c=colors[box.label]\n if isinstance(box.mask, np.ndarray):\n mask = box.mask\n rgb_color=tuple(map(lambda x: math.floor(x*255),c[:-1]))\n cls.draw_mask(img, mask, color=rgb_color, alpha=alpha)\n label=\"{} :{:.2f}\".format(box.label,box.score)\n ax.add_patch(\n patches.Rectangle(\n (box.x1,box.y1),\n box.x2-box.x1,\n box.y2-box.y1,\n linewidth=3,\n edgecolor=c,\n facecolor='none',\n fill=False\n ))\n ax.text(\n x=box.x1,\n y=box.y1,\n s=label,\n color=\"white\",\n fontsize=12,\n bbox=dict(boxstyle=\"round\",facecolor=colors[box.label],alpha=0.9))\n ax.set_axis_off()\n ax.imshow(img)\n #img = cls.fig2numpy(fig)\n #cls._imshow(img)\n plt.show()\n #cv2.putText(rgb, label,(box.x1 + 10,box.y1 + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[box.label],2)\n #fig.savefig('result_{}.png'.format(uuid.uuid4()), dpi=300, bbox_inches='tight')\n return fig\n\n @staticmethod\n def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=None, labels_map =None):\n '''\n return the annotations of the image\n :param image_path: image path\n :return: a list with the bounding boxes\n '''\n try:\n image_path = Path(image_path)\n file_name = image_path.stem\n if xml_path is None:\n xml_path = str(image_path.parent.joinpath(\"{}.xml\".format(file_name)))\n\n mask_numpy = None\n try:\n if mask_path:\n with open(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n pil_mask = Image.open(encoded_png_io)\n if pil_mask.format != 'PNG':\n raise ValueError('Mask image format not PNG')\n mask_numpy = np.asarray(pil_mask)\n except Exception as ex:\n raise Exception(\"error reading the image\")\n\n # load image\n with open(xml_path) as f:\n xml = f.read()\n root = objectify.fromstring(xml)\n boxes = []\n for item in root.object:\n xmin = item.bndbox.xmin\n ymin = item.bndbox.ymin\n xmax = item.bndbox.xmax\n ymax = item.bndbox.ymax\n label = str(item.name)\n box = BoundingBox(\n x1=xmin,\n x2=xmax,\n y1=ymin,\n y2=ymax,\n label=label.title()\n )\n boxes.append(box)\n if isinstance(mask_numpy, np.ndarray) and labels_map:\n if label in labels_map:\n class_id = labels_map[label]\n binary_mask = np.zeros_like(mask_numpy)\n binary_mask[mask_numpy == class_id] = 1\n box.mask = binary_mask\n boxes.append(box)\n return boxes\n except Exception as e:\n print(\"Error reading the image {}\".format(str(e)))\n\n @classmethod\n def make_grid(cls, images_folder, annotations_folder=None, masks_folder = None, n=4, rows=2, figsize = (10,10), fontsize=10, labels_map=None):\n import matplotlib\n matplotlib.use('WXAgg')\n import matplotlib.pylab as plt\n images_folder = Path(images_folder)\n assert images_folder.exists(), \"images folder not found\"\n if annotations_folder is None:\n annotations_folder = images_folder\n if masks_folder is None:\n masks_folder = images_folder\n # read files\n img_files = FileUtil.get_files(images_folder, [\".jpg\", \".jpeg\"])\n xml_files = FileUtil.get_files(annotations_folder, [\".xml\"])\n png_files = FileUtil.get_files(masks_folder, [\".png\"])\n files = img_files + xml_files + png_files\n files = sorted(files, key=lambda img: img.stem)\n files = [(img_name,list(img_files)) for img_name, img_files in itertools.groupby(files, key=lambda img: img.stem)]\n files = random.sample(files, k=n)\n cols = math.ceil(n / rows)\n if labels_map:\n labels_map = {k.title():v for k,v in labels_map.items()}\n\n # load annotations\n annotations_dict = {}\n for img_name, img_files in files:\n if len(img_files) >= 2:\n img_path = img_files[0]\n xml_path = img_files[1]\n mask_path = None\n if len(img_files) == 3:\n mask_path = img_files[2]\n annotations_dict[img_path] = cls.get_voc_annotations(img_path, xml_path, mask_path, labels_map)\n\n assert len(annotations_dict) > 0, \"Not annotations found\"\n labels = set([box.label for boxes in annotations_dict.values() for box in boxes ])\n labels_colors = dict(zip(labels, ColorUtil.rainbow_rgb(len(labels))))\n\n # show annotations\n fig = plt.figure(figsize=figsize)\n for i, (img_path, img_boxes) in enumerate(annotations_dict.items()):\n pil_image: Image = Image.open(img_path)\n image_numpy = np.asarray(pil_image).copy()\n # create figure\n ax = fig.add_subplot(rows, cols, i + 1)\n ax.set_axis_off()\n for box in img_boxes:\n label = box.label\n color = labels_colors[label]\n ax.add_patch(\n patches.Rectangle(\n (box.x1, box.y1),\n box.x2 - box.x1,\n box.y2 - box.y1,\n linewidth=3,\n edgecolor=np.asarray(color) /255,\n facecolor='none',\n fill=False\n ))\n if isinstance(box.mask, np.ndarray) and labels_map:\n cls.draw_mask(image_numpy, box.mask,color)\n # ax.text(\n # x=box[0][0] + 20,\n # y=box[0][1] + 20,\n # s=label,\n # color=\"white\",\n # fontsize=12,\n # bbox=dict(boxstyle=\"round\", facecolor=np.array(color) / 255, alpha=0.9))\n\n ax.imshow(image_numpy)\n plt.show()\n\n\n\n\n\n\n\n\n",
"import io\nimport itertools\nimport math\nimport os\nimport random\nfrom io import BytesIO\nfrom pathlib import Path\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom matplotlib import patches\nfrom .misc_util import BoundingBox\nfrom .file_util import FileUtil\nfrom .color_util import ColorUtil\nfrom lxml import objectify\n\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = w, h, 4\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n\n @staticmethod\n def fig2numpy(fig, dpi=180):\n buf = BytesIO()\n fig.savefig(buf, format='png', dpi=dpi)\n buf.seek(0)\n img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)\n buf.close()\n img = cv2.imdecode(img_arr, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n\n @classmethod\n def imshow(cls, img, boxes, alpha=0.3):\n labels = np.unique([box.label for box in boxes])\n colors_dict = ColorUtil.rainbow(len(labels))\n R = np.asarray(colors_dict['r'], dtype=np.float) / 255.0\n G = np.asarray(colors_dict['g'], dtype=np.float) / 255.0\n B = np.asarray(colors_dict['b'], dtype=np.float) / 255.0\n A = [1.0] * len(labels)\n palette = list(zip(B.tolist(), G.tolist(), R.tolist(), A))\n colors = {label: palette[i] for i, label in enumerate(labels)}\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(111, aspect='equal')\n for i, box in enumerate(boxes):\n c = colors[box.label]\n if isinstance(box.mask, np.ndarray):\n mask = box.mask\n rgb_color = tuple(map(lambda x: math.floor(x * 255), c[:-1]))\n cls.draw_mask(img, mask, color=rgb_color, alpha=alpha)\n label = '{} :{:.2f}'.format(box.label, box.score)\n ax.add_patch(patches.Rectangle((box.x1, box.y1), box.x2 - box.\n x1, box.y2 - box.y1, linewidth=3, edgecolor=c, facecolor=\n 'none', fill=False))\n ax.text(x=box.x1, y=box.y1, s=label, color='white', fontsize=12,\n bbox=dict(boxstyle='round', facecolor=colors[box.label],\n alpha=0.9))\n ax.set_axis_off()\n ax.imshow(img)\n plt.show()\n return fig\n\n @staticmethod\n def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=\n None, labels_map=None):\n \"\"\"\n return the annotations of the image\n :param image_path: image path\n :return: a list with the bounding boxes\n \"\"\"\n try:\n image_path = Path(image_path)\n file_name = image_path.stem\n if xml_path is None:\n xml_path = str(image_path.parent.joinpath('{}.xml'.format(\n file_name)))\n mask_numpy = None\n try:\n if mask_path:\n with open(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n pil_mask = Image.open(encoded_png_io)\n if pil_mask.format != 'PNG':\n raise ValueError('Mask image format not PNG')\n mask_numpy = np.asarray(pil_mask)\n except Exception as ex:\n raise Exception('error reading the image')\n with open(xml_path) as f:\n xml = f.read()\n root = objectify.fromstring(xml)\n boxes = []\n for item in root.object:\n xmin = item.bndbox.xmin\n ymin = item.bndbox.ymin\n xmax = item.bndbox.xmax\n ymax = item.bndbox.ymax\n label = str(item.name)\n box = BoundingBox(x1=xmin, x2=xmax, y1=ymin, y2=ymax, label\n =label.title())\n boxes.append(box)\n if isinstance(mask_numpy, np.ndarray) and labels_map:\n if label in labels_map:\n class_id = labels_map[label]\n binary_mask = np.zeros_like(mask_numpy)\n binary_mask[mask_numpy == class_id] = 1\n box.mask = binary_mask\n boxes.append(box)\n return boxes\n except Exception as e:\n print('Error reading the image {}'.format(str(e)))\n\n @classmethod\n def make_grid(cls, images_folder, annotations_folder=None, masks_folder\n =None, n=4, rows=2, figsize=(10, 10), fontsize=10, labels_map=None):\n import matplotlib\n matplotlib.use('WXAgg')\n import matplotlib.pylab as plt\n images_folder = Path(images_folder)\n assert images_folder.exists(), 'images folder not found'\n if annotations_folder is None:\n annotations_folder = images_folder\n if masks_folder is None:\n masks_folder = images_folder\n img_files = FileUtil.get_files(images_folder, ['.jpg', '.jpeg'])\n xml_files = FileUtil.get_files(annotations_folder, ['.xml'])\n png_files = FileUtil.get_files(masks_folder, ['.png'])\n files = img_files + xml_files + png_files\n files = sorted(files, key=lambda img: img.stem)\n files = [(img_name, list(img_files)) for img_name, img_files in\n itertools.groupby(files, key=lambda img: img.stem)]\n files = random.sample(files, k=n)\n cols = math.ceil(n / rows)\n if labels_map:\n labels_map = {k.title(): v for k, v in labels_map.items()}\n annotations_dict = {}\n for img_name, img_files in files:\n if len(img_files) >= 2:\n img_path = img_files[0]\n xml_path = img_files[1]\n mask_path = None\n if len(img_files) == 3:\n mask_path = img_files[2]\n annotations_dict[img_path] = cls.get_voc_annotations(img_path,\n xml_path, mask_path, labels_map)\n assert len(annotations_dict) > 0, 'Not annotations found'\n labels = set([box.label for boxes in annotations_dict.values() for\n box in boxes])\n labels_colors = dict(zip(labels, ColorUtil.rainbow_rgb(len(labels))))\n fig = plt.figure(figsize=figsize)\n for i, (img_path, img_boxes) in enumerate(annotations_dict.items()):\n pil_image: Image = Image.open(img_path)\n image_numpy = np.asarray(pil_image).copy()\n ax = fig.add_subplot(rows, cols, i + 1)\n ax.set_axis_off()\n for box in img_boxes:\n label = box.label\n color = labels_colors[label]\n ax.add_patch(patches.Rectangle((box.x1, box.y1), box.x2 -\n box.x1, box.y2 - box.y1, linewidth=3, edgecolor=np.\n asarray(color) / 255, facecolor='none', fill=False))\n if isinstance(box.mask, np.ndarray) and labels_map:\n cls.draw_mask(image_numpy, box.mask, color)\n ax.imshow(image_numpy)\n plt.show()\n",
"<import token>\n\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = w, h, 4\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n\n @staticmethod\n def fig2numpy(fig, dpi=180):\n buf = BytesIO()\n fig.savefig(buf, format='png', dpi=dpi)\n buf.seek(0)\n img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)\n buf.close()\n img = cv2.imdecode(img_arr, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n\n @classmethod\n def imshow(cls, img, boxes, alpha=0.3):\n labels = np.unique([box.label for box in boxes])\n colors_dict = ColorUtil.rainbow(len(labels))\n R = np.asarray(colors_dict['r'], dtype=np.float) / 255.0\n G = np.asarray(colors_dict['g'], dtype=np.float) / 255.0\n B = np.asarray(colors_dict['b'], dtype=np.float) / 255.0\n A = [1.0] * len(labels)\n palette = list(zip(B.tolist(), G.tolist(), R.tolist(), A))\n colors = {label: palette[i] for i, label in enumerate(labels)}\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(111, aspect='equal')\n for i, box in enumerate(boxes):\n c = colors[box.label]\n if isinstance(box.mask, np.ndarray):\n mask = box.mask\n rgb_color = tuple(map(lambda x: math.floor(x * 255), c[:-1]))\n cls.draw_mask(img, mask, color=rgb_color, alpha=alpha)\n label = '{} :{:.2f}'.format(box.label, box.score)\n ax.add_patch(patches.Rectangle((box.x1, box.y1), box.x2 - box.\n x1, box.y2 - box.y1, linewidth=3, edgecolor=c, facecolor=\n 'none', fill=False))\n ax.text(x=box.x1, y=box.y1, s=label, color='white', fontsize=12,\n bbox=dict(boxstyle='round', facecolor=colors[box.label],\n alpha=0.9))\n ax.set_axis_off()\n ax.imshow(img)\n plt.show()\n return fig\n\n @staticmethod\n def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=\n None, labels_map=None):\n \"\"\"\n return the annotations of the image\n :param image_path: image path\n :return: a list with the bounding boxes\n \"\"\"\n try:\n image_path = Path(image_path)\n file_name = image_path.stem\n if xml_path is None:\n xml_path = str(image_path.parent.joinpath('{}.xml'.format(\n file_name)))\n mask_numpy = None\n try:\n if mask_path:\n with open(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n pil_mask = Image.open(encoded_png_io)\n if pil_mask.format != 'PNG':\n raise ValueError('Mask image format not PNG')\n mask_numpy = np.asarray(pil_mask)\n except Exception as ex:\n raise Exception('error reading the image')\n with open(xml_path) as f:\n xml = f.read()\n root = objectify.fromstring(xml)\n boxes = []\n for item in root.object:\n xmin = item.bndbox.xmin\n ymin = item.bndbox.ymin\n xmax = item.bndbox.xmax\n ymax = item.bndbox.ymax\n label = str(item.name)\n box = BoundingBox(x1=xmin, x2=xmax, y1=ymin, y2=ymax, label\n =label.title())\n boxes.append(box)\n if isinstance(mask_numpy, np.ndarray) and labels_map:\n if label in labels_map:\n class_id = labels_map[label]\n binary_mask = np.zeros_like(mask_numpy)\n binary_mask[mask_numpy == class_id] = 1\n box.mask = binary_mask\n boxes.append(box)\n return boxes\n except Exception as e:\n print('Error reading the image {}'.format(str(e)))\n\n @classmethod\n def make_grid(cls, images_folder, annotations_folder=None, masks_folder\n =None, n=4, rows=2, figsize=(10, 10), fontsize=10, labels_map=None):\n import matplotlib\n matplotlib.use('WXAgg')\n import matplotlib.pylab as plt\n images_folder = Path(images_folder)\n assert images_folder.exists(), 'images folder not found'\n if annotations_folder is None:\n annotations_folder = images_folder\n if masks_folder is None:\n masks_folder = images_folder\n img_files = FileUtil.get_files(images_folder, ['.jpg', '.jpeg'])\n xml_files = FileUtil.get_files(annotations_folder, ['.xml'])\n png_files = FileUtil.get_files(masks_folder, ['.png'])\n files = img_files + xml_files + png_files\n files = sorted(files, key=lambda img: img.stem)\n files = [(img_name, list(img_files)) for img_name, img_files in\n itertools.groupby(files, key=lambda img: img.stem)]\n files = random.sample(files, k=n)\n cols = math.ceil(n / rows)\n if labels_map:\n labels_map = {k.title(): v for k, v in labels_map.items()}\n annotations_dict = {}\n for img_name, img_files in files:\n if len(img_files) >= 2:\n img_path = img_files[0]\n xml_path = img_files[1]\n mask_path = None\n if len(img_files) == 3:\n mask_path = img_files[2]\n annotations_dict[img_path] = cls.get_voc_annotations(img_path,\n xml_path, mask_path, labels_map)\n assert len(annotations_dict) > 0, 'Not annotations found'\n labels = set([box.label for boxes in annotations_dict.values() for\n box in boxes])\n labels_colors = dict(zip(labels, ColorUtil.rainbow_rgb(len(labels))))\n fig = plt.figure(figsize=figsize)\n for i, (img_path, img_boxes) in enumerate(annotations_dict.items()):\n pil_image: Image = Image.open(img_path)\n image_numpy = np.asarray(pil_image).copy()\n ax = fig.add_subplot(rows, cols, i + 1)\n ax.set_axis_off()\n for box in img_boxes:\n label = box.label\n color = labels_colors[label]\n ax.add_patch(patches.Rectangle((box.x1, box.y1), box.x2 -\n box.x1, box.y2 - box.y1, linewidth=3, edgecolor=np.\n asarray(color) / 255, facecolor='none', fill=False))\n if isinstance(box.mask, np.ndarray) and labels_map:\n cls.draw_mask(image_numpy, box.mask, color)\n ax.imshow(image_numpy)\n plt.show()\n",
"<import token>\n\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = w, h, 4\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n\n @staticmethod\n def fig2numpy(fig, dpi=180):\n buf = BytesIO()\n fig.savefig(buf, format='png', dpi=dpi)\n buf.seek(0)\n img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)\n buf.close()\n img = cv2.imdecode(img_arr, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n\n @staticmethod\n def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=\n None, labels_map=None):\n \"\"\"\n return the annotations of the image\n :param image_path: image path\n :return: a list with the bounding boxes\n \"\"\"\n try:\n image_path = Path(image_path)\n file_name = image_path.stem\n if xml_path is None:\n xml_path = str(image_path.parent.joinpath('{}.xml'.format(\n file_name)))\n mask_numpy = None\n try:\n if mask_path:\n with open(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n pil_mask = Image.open(encoded_png_io)\n if pil_mask.format != 'PNG':\n raise ValueError('Mask image format not PNG')\n mask_numpy = np.asarray(pil_mask)\n except Exception as ex:\n raise Exception('error reading the image')\n with open(xml_path) as f:\n xml = f.read()\n root = objectify.fromstring(xml)\n boxes = []\n for item in root.object:\n xmin = item.bndbox.xmin\n ymin = item.bndbox.ymin\n xmax = item.bndbox.xmax\n ymax = item.bndbox.ymax\n label = str(item.name)\n box = BoundingBox(x1=xmin, x2=xmax, y1=ymin, y2=ymax, label\n =label.title())\n boxes.append(box)\n if isinstance(mask_numpy, np.ndarray) and labels_map:\n if label in labels_map:\n class_id = labels_map[label]\n binary_mask = np.zeros_like(mask_numpy)\n binary_mask[mask_numpy == class_id] = 1\n box.mask = binary_mask\n boxes.append(box)\n return boxes\n except Exception as e:\n print('Error reading the image {}'.format(str(e)))\n\n @classmethod\n def make_grid(cls, images_folder, annotations_folder=None, masks_folder\n =None, n=4, rows=2, figsize=(10, 10), fontsize=10, labels_map=None):\n import matplotlib\n matplotlib.use('WXAgg')\n import matplotlib.pylab as plt\n images_folder = Path(images_folder)\n assert images_folder.exists(), 'images folder not found'\n if annotations_folder is None:\n annotations_folder = images_folder\n if masks_folder is None:\n masks_folder = images_folder\n img_files = FileUtil.get_files(images_folder, ['.jpg', '.jpeg'])\n xml_files = FileUtil.get_files(annotations_folder, ['.xml'])\n png_files = FileUtil.get_files(masks_folder, ['.png'])\n files = img_files + xml_files + png_files\n files = sorted(files, key=lambda img: img.stem)\n files = [(img_name, list(img_files)) for img_name, img_files in\n itertools.groupby(files, key=lambda img: img.stem)]\n files = random.sample(files, k=n)\n cols = math.ceil(n / rows)\n if labels_map:\n labels_map = {k.title(): v for k, v in labels_map.items()}\n annotations_dict = {}\n for img_name, img_files in files:\n if len(img_files) >= 2:\n img_path = img_files[0]\n xml_path = img_files[1]\n mask_path = None\n if len(img_files) == 3:\n mask_path = img_files[2]\n annotations_dict[img_path] = cls.get_voc_annotations(img_path,\n xml_path, mask_path, labels_map)\n assert len(annotations_dict) > 0, 'Not annotations found'\n labels = set([box.label for boxes in annotations_dict.values() for\n box in boxes])\n labels_colors = dict(zip(labels, ColorUtil.rainbow_rgb(len(labels))))\n fig = plt.figure(figsize=figsize)\n for i, (img_path, img_boxes) in enumerate(annotations_dict.items()):\n pil_image: Image = Image.open(img_path)\n image_numpy = np.asarray(pil_image).copy()\n ax = fig.add_subplot(rows, cols, i + 1)\n ax.set_axis_off()\n for box in img_boxes:\n label = box.label\n color = labels_colors[label]\n ax.add_patch(patches.Rectangle((box.x1, box.y1), box.x2 -\n box.x1, box.y2 - box.y1, linewidth=3, edgecolor=np.\n asarray(color) / 255, facecolor='none', fill=False))\n if isinstance(box.mask, np.ndarray) and labels_map:\n cls.draw_mask(image_numpy, box.mask, color)\n ax.imshow(image_numpy)\n plt.show()\n",
"<import token>\n\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = w, h, 4\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n\n @staticmethod\n def fig2numpy(fig, dpi=180):\n buf = BytesIO()\n fig.savefig(buf, format='png', dpi=dpi)\n buf.seek(0)\n img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)\n buf.close()\n img = cv2.imdecode(img_arr, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n\n @staticmethod\n def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=\n None, labels_map=None):\n \"\"\"\n return the annotations of the image\n :param image_path: image path\n :return: a list with the bounding boxes\n \"\"\"\n try:\n image_path = Path(image_path)\n file_name = image_path.stem\n if xml_path is None:\n xml_path = str(image_path.parent.joinpath('{}.xml'.format(\n file_name)))\n mask_numpy = None\n try:\n if mask_path:\n with open(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n pil_mask = Image.open(encoded_png_io)\n if pil_mask.format != 'PNG':\n raise ValueError('Mask image format not PNG')\n mask_numpy = np.asarray(pil_mask)\n except Exception as ex:\n raise Exception('error reading the image')\n with open(xml_path) as f:\n xml = f.read()\n root = objectify.fromstring(xml)\n boxes = []\n for item in root.object:\n xmin = item.bndbox.xmin\n ymin = item.bndbox.ymin\n xmax = item.bndbox.xmax\n ymax = item.bndbox.ymax\n label = str(item.name)\n box = BoundingBox(x1=xmin, x2=xmax, y1=ymin, y2=ymax, label\n =label.title())\n boxes.append(box)\n if isinstance(mask_numpy, np.ndarray) and labels_map:\n if label in labels_map:\n class_id = labels_map[label]\n binary_mask = np.zeros_like(mask_numpy)\n binary_mask[mask_numpy == class_id] = 1\n box.mask = binary_mask\n boxes.append(box)\n return boxes\n except Exception as e:\n print('Error reading the image {}'.format(str(e)))\n <function token>\n",
"<import token>\n\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = w, h, 4\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n <function token>\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n\n @staticmethod\n def get_voc_annotations(image_path: str, xml_path: str=None, mask_path=\n None, labels_map=None):\n \"\"\"\n return the annotations of the image\n :param image_path: image path\n :return: a list with the bounding boxes\n \"\"\"\n try:\n image_path = Path(image_path)\n file_name = image_path.stem\n if xml_path is None:\n xml_path = str(image_path.parent.joinpath('{}.xml'.format(\n file_name)))\n mask_numpy = None\n try:\n if mask_path:\n with open(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n pil_mask = Image.open(encoded_png_io)\n if pil_mask.format != 'PNG':\n raise ValueError('Mask image format not PNG')\n mask_numpy = np.asarray(pil_mask)\n except Exception as ex:\n raise Exception('error reading the image')\n with open(xml_path) as f:\n xml = f.read()\n root = objectify.fromstring(xml)\n boxes = []\n for item in root.object:\n xmin = item.bndbox.xmin\n ymin = item.bndbox.ymin\n xmax = item.bndbox.xmax\n ymax = item.bndbox.ymax\n label = str(item.name)\n box = BoundingBox(x1=xmin, x2=xmax, y1=ymin, y2=ymax, label\n =label.title())\n boxes.append(box)\n if isinstance(mask_numpy, np.ndarray) and labels_map:\n if label in labels_map:\n class_id = labels_map[label]\n binary_mask = np.zeros_like(mask_numpy)\n binary_mask[mask_numpy == class_id] = 1\n box.mask = binary_mask\n boxes.append(box)\n return boxes\n except Exception as e:\n print('Error reading the image {}'.format(str(e)))\n <function token>\n",
"<import token>\n\n\nclass VIUtil:\n\n @staticmethod\n def fig2buffer(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = w, h, 4\n buf = np.roll(buf, 3, axis=2)\n return buf\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n <function token>\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass VIUtil:\n <function token>\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n <function token>\n\n @staticmethod\n def _imshow(img: np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.namedWindow('src', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('src', (800, 600))\n cv2.imshow('src', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass VIUtil:\n <function token>\n\n @classmethod\n def fig2img(cls, fig):\n buf = cls.fig2buffer(fig)\n w, h, d = buf.shape\n img = Image.frombytes('RGBA', (w, h), buf.tostring())\n return np.asarray(img)\n <function token>\n <function token>\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass VIUtil:\n <function token>\n <function token>\n <function token>\n <function token>\n\n @staticmethod\n def draw_mask(image, mask, color=(255, 0, 0), alpha=0.4):\n assert image.dtype == np.uint8, '`image` not of type np.uint8'\n assert mask.dtype == np.uint8, '`mask` not of type np.uint8'\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError(\n 'The image has spatial dimensions %s but the mask has dimensions %s'\n % (image.shape[:2], mask.shape))\n pil_image = Image.fromarray(image)\n solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(\n list(color), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA'\n )\n pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass VIUtil:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,494 |
e36c407500aaeedc8f159bfa694bbbdb051b76db
|
# Generated by Django 2.2.1 on 2019-05-20 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now=True)),
('input_image', models.ImageField(upload_to='%Y/%m/%d/')),
('style_image', models.ImageField(upload_to='%Y/%m/%d/')),
('colored_image', models.ImageField(blank=True, default=None, null=True, upload_to='%Y/%m/%d/')),
('style_transferred_image', models.ImageField(blank=True, default=None, null=True, upload_to='neural_style/%Y/%m/%d/')),
('visually_similar_image', models.ImageField(blank=True, default=None, null=True, upload_to='%Y/%m/%d/')),
('pixel_sorted_image', models.ImageField(blank=True, default=None, null=True, upload_to='%Y/%m/%d/')),
('final_image', models.ImageField(blank=True, default=None, null=True, upload_to='finals/%Y/%m/%d/')),
('has_failed', models.BooleanField(default=False)),
],
),
]
|
[
"# Generated by Django 2.2.1 on 2019-05-20 15:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Artwork',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('create_date', models.DateTimeField(auto_now_add=True)),\n ('update_date', models.DateTimeField(auto_now=True)),\n ('input_image', models.ImageField(upload_to='%Y/%m/%d/')),\n ('style_image', models.ImageField(upload_to='%Y/%m/%d/')),\n ('colored_image', models.ImageField(blank=True, default=None, null=True, upload_to='%Y/%m/%d/')),\n ('style_transferred_image', models.ImageField(blank=True, default=None, null=True, upload_to='neural_style/%Y/%m/%d/')),\n ('visually_similar_image', models.ImageField(blank=True, default=None, null=True, upload_to='%Y/%m/%d/')),\n ('pixel_sorted_image', models.ImageField(blank=True, default=None, null=True, upload_to='%Y/%m/%d/')),\n ('final_image', models.ImageField(blank=True, default=None, null=True, upload_to='finals/%Y/%m/%d/')),\n ('has_failed', models.BooleanField(default=False)),\n ],\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Artwork', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('create_date', models.DateTimeField(\n auto_now_add=True)), ('update_date', models.DateTimeField(auto_now=\n True)), ('input_image', models.ImageField(upload_to='%Y/%m/%d/')),\n ('style_image', models.ImageField(upload_to='%Y/%m/%d/')), (\n 'colored_image', models.ImageField(blank=True, default=None, null=\n True, upload_to='%Y/%m/%d/')), ('style_transferred_image', models.\n ImageField(blank=True, default=None, null=True, upload_to=\n 'neural_style/%Y/%m/%d/')), ('visually_similar_image', models.\n ImageField(blank=True, default=None, null=True, upload_to=\n '%Y/%m/%d/')), ('pixel_sorted_image', models.ImageField(blank=True,\n default=None, null=True, upload_to='%Y/%m/%d/')), ('final_image',\n models.ImageField(blank=True, default=None, null=True, upload_to=\n 'finals/%Y/%m/%d/')), ('has_failed', models.BooleanField(default=\n False))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Artwork', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('create_date', models.DateTimeField(\n auto_now_add=True)), ('update_date', models.DateTimeField(auto_now=\n True)), ('input_image', models.ImageField(upload_to='%Y/%m/%d/')),\n ('style_image', models.ImageField(upload_to='%Y/%m/%d/')), (\n 'colored_image', models.ImageField(blank=True, default=None, null=\n True, upload_to='%Y/%m/%d/')), ('style_transferred_image', models.\n ImageField(blank=True, default=None, null=True, upload_to=\n 'neural_style/%Y/%m/%d/')), ('visually_similar_image', models.\n ImageField(blank=True, default=None, null=True, upload_to=\n '%Y/%m/%d/')), ('pixel_sorted_image', models.ImageField(blank=True,\n default=None, null=True, upload_to='%Y/%m/%d/')), ('final_image',\n models.ImageField(blank=True, default=None, null=True, upload_to=\n 'finals/%Y/%m/%d/')), ('has_failed', models.BooleanField(default=\n False))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,495 |
bb74227cbac39f99f40adcc4c08f279c7ec80c31
|
import sqlite3
#connect to the database
conn = sqlite3.connect('CustomerOrders.db')
# Create a cursor
c = conn.cursor()
#Inserts addresses into the address table, starting the automatic numbering process for addresses
c.execute("""INSERT INTO tblOrders (CustomerID,AddressID,ProductID, ProductName, Quantity, TotalCost,OrderStatus)
VALUES ()""")
#Confrims record insertion
print('We have inserted', c.rowcount, 'address(es) into the table.')
#Defines address information to be inserted
Addresses = [
("Cadbury World","Linden Rd", "Birmingham", "B30 1JR", "United Kingdom"),
("GCHQ Cheltenham","Hubble Rd", "Cheltenham", "GL51 0EX","United Kingdom")
]
#Inserts address information into table
c.executemany('INSERT INTO tblAddress (Street, City, County, Postcode, Country) VALUES (?,?,?,?,?);',Addresses)
#Verifies the data has been added
print("\nAdded new records successfully")
#Commit our changes
conn.commit()
|
[
"import sqlite3\n\n#connect to the database\nconn = sqlite3.connect('CustomerOrders.db')\n\n# Create a cursor\nc = conn.cursor()\n\n#Inserts addresses into the address table, starting the automatic numbering process for addresses\nc.execute(\"\"\"INSERT INTO tblOrders (CustomerID,AddressID,ProductID, ProductName, Quantity, TotalCost,OrderStatus) \nVALUES ()\"\"\")\n\n#Confrims record insertion\nprint('We have inserted', c.rowcount, 'address(es) into the table.')\n\n#Defines address information to be inserted\nAddresses = [\n (\"Cadbury World\",\"Linden Rd\", \"Birmingham\", \"B30 1JR\", \"United Kingdom\"),\n (\"GCHQ Cheltenham\",\"Hubble Rd\", \"Cheltenham\", \"GL51 0EX\",\"United Kingdom\")\n ]\n\n#Inserts address information into table\nc.executemany('INSERT INTO tblAddress (Street, City, County, Postcode, Country) VALUES (?,?,?,?,?);',Addresses)\n\n#Verifies the data has been added\nprint(\"\\nAdded new records successfully\")\n\n#Commit our changes\nconn.commit()",
"import sqlite3\nconn = sqlite3.connect('CustomerOrders.db')\nc = conn.cursor()\nc.execute(\n \"\"\"INSERT INTO tblOrders (CustomerID,AddressID,ProductID, ProductName, Quantity, TotalCost,OrderStatus) \nVALUES ()\"\"\"\n )\nprint('We have inserted', c.rowcount, 'address(es) into the table.')\nAddresses = [('Cadbury World', 'Linden Rd', 'Birmingham', 'B30 1JR',\n 'United Kingdom'), ('GCHQ Cheltenham', 'Hubble Rd', 'Cheltenham',\n 'GL51 0EX', 'United Kingdom')]\nc.executemany(\n 'INSERT INTO tblAddress (Street, City, County, Postcode, Country) VALUES (?,?,?,?,?);'\n , Addresses)\nprint(\"\"\"\nAdded new records successfully\"\"\")\nconn.commit()\n",
"<import token>\nconn = sqlite3.connect('CustomerOrders.db')\nc = conn.cursor()\nc.execute(\n \"\"\"INSERT INTO tblOrders (CustomerID,AddressID,ProductID, ProductName, Quantity, TotalCost,OrderStatus) \nVALUES ()\"\"\"\n )\nprint('We have inserted', c.rowcount, 'address(es) into the table.')\nAddresses = [('Cadbury World', 'Linden Rd', 'Birmingham', 'B30 1JR',\n 'United Kingdom'), ('GCHQ Cheltenham', 'Hubble Rd', 'Cheltenham',\n 'GL51 0EX', 'United Kingdom')]\nc.executemany(\n 'INSERT INTO tblAddress (Street, City, County, Postcode, Country) VALUES (?,?,?,?,?);'\n , Addresses)\nprint(\"\"\"\nAdded new records successfully\"\"\")\nconn.commit()\n",
"<import token>\n<assignment token>\nc.execute(\n \"\"\"INSERT INTO tblOrders (CustomerID,AddressID,ProductID, ProductName, Quantity, TotalCost,OrderStatus) \nVALUES ()\"\"\"\n )\nprint('We have inserted', c.rowcount, 'address(es) into the table.')\n<assignment token>\nc.executemany(\n 'INSERT INTO tblAddress (Street, City, County, Postcode, Country) VALUES (?,?,?,?,?);'\n , Addresses)\nprint(\"\"\"\nAdded new records successfully\"\"\")\nconn.commit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,496 |
6b8f3f78029ae6d5ddfa661f06580149ba15133e
|
import json
class BaseDAO(object):
""" Potential Base class for API DAOs."""
def __init__(self, DB):
self.DB_FILE = DB
with open(DB) as db:
self.db = json.load(db)
def _write(self, data):
"""Write updated data list as in-mem list and to file.
Args:
data (dict): Object to write to db.
"""
self.db.append(data)
with open(self.DB_FILE, 'w') as outfile:
json.dump(self.db, outfile)
def get(self):
return self.db
def _create_id(self):
raise NotImplementedError
def create(self, data):
obj = data
obj['id'] = self._create_id()
self._write(obj)
return obj
|
[
"import json\n\n\nclass BaseDAO(object):\n \"\"\" Potential Base class for API DAOs.\"\"\"\n\n def __init__(self, DB):\n self.DB_FILE = DB\n with open(DB) as db:\n self.db = json.load(db)\n\n def _write(self, data):\n \"\"\"Write updated data list as in-mem list and to file.\n\n Args:\n data (dict): Object to write to db.\n \"\"\"\n self.db.append(data)\n\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n\n def create(self, data):\n obj = data\n obj['id'] = self._create_id()\n self._write(obj)\n return obj\n",
"import json\n\n\nclass BaseDAO(object):\n \"\"\" Potential Base class for API DAOs.\"\"\"\n\n def __init__(self, DB):\n self.DB_FILE = DB\n with open(DB) as db:\n self.db = json.load(db)\n\n def _write(self, data):\n \"\"\"Write updated data list as in-mem list and to file.\n\n Args:\n data (dict): Object to write to db.\n \"\"\"\n self.db.append(data)\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n\n def create(self, data):\n obj = data\n obj['id'] = self._create_id()\n self._write(obj)\n return obj\n",
"<import token>\n\n\nclass BaseDAO(object):\n \"\"\" Potential Base class for API DAOs.\"\"\"\n\n def __init__(self, DB):\n self.DB_FILE = DB\n with open(DB) as db:\n self.db = json.load(db)\n\n def _write(self, data):\n \"\"\"Write updated data list as in-mem list and to file.\n\n Args:\n data (dict): Object to write to db.\n \"\"\"\n self.db.append(data)\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n\n def create(self, data):\n obj = data\n obj['id'] = self._create_id()\n self._write(obj)\n return obj\n",
"<import token>\n\n\nclass BaseDAO(object):\n <docstring token>\n\n def __init__(self, DB):\n self.DB_FILE = DB\n with open(DB) as db:\n self.db = json.load(db)\n\n def _write(self, data):\n \"\"\"Write updated data list as in-mem list and to file.\n\n Args:\n data (dict): Object to write to db.\n \"\"\"\n self.db.append(data)\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n\n def create(self, data):\n obj = data\n obj['id'] = self._create_id()\n self._write(obj)\n return obj\n",
"<import token>\n\n\nclass BaseDAO(object):\n <docstring token>\n <function token>\n\n def _write(self, data):\n \"\"\"Write updated data list as in-mem list and to file.\n\n Args:\n data (dict): Object to write to db.\n \"\"\"\n self.db.append(data)\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n\n def create(self, data):\n obj = data\n obj['id'] = self._create_id()\n self._write(obj)\n return obj\n",
"<import token>\n\n\nclass BaseDAO(object):\n <docstring token>\n <function token>\n <function token>\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n\n def create(self, data):\n obj = data\n obj['id'] = self._create_id()\n self._write(obj)\n return obj\n",
"<import token>\n\n\nclass BaseDAO(object):\n <docstring token>\n <function token>\n <function token>\n\n def get(self):\n return self.db\n\n def _create_id(self):\n raise NotImplementedError\n <function token>\n",
"<import token>\n\n\nclass BaseDAO(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def _create_id(self):\n raise NotImplementedError\n <function token>\n",
"<import token>\n\n\nclass BaseDAO(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,497 |
df3f7a8ec7c880b88ad8c2eb192a55fc7ac4943d
|
import json
from os import path
import airflow
from airflow import DAG
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from dbnd import log_dataframe, log_metric, task
from dbnd._core.constants import DbndTargetOperationType
# Retreive Airflow Variables
AWS_CONN_ID = Variable.get("AWS_s3_conn_id")
DAG_ID = Variable.get("s3_key_monitor_DAG_id")
S3_KEY_MONITOR_SCHEDULE = Variable.get("s3_key_monitor_schedule")
try:
TARGET_KEYS = Variable.get("s3_monitor_target_keys")
except:
TARGET_KEYS = None
try:
TARGET_PREFIXES = Variable.get("s3_monitor_target_prefixes")
except:
TARGET_PREFIXES = None
MB = 1048576 # conversion factor from Byte to MB
DEFAULT_ARGS = {
"owner": "databand",
"start_date": airflow.utils.dates.days_ago(0),
"provide_context": True,
}
dag = DAG(
dag_id=DAG_ID,
schedule_interval="{}".format(S3_KEY_MONITOR_SCHEDULE),
default_args=DEFAULT_ARGS,
tags=["s3", "dbnd_monitor"],
)
def parse_s3_uri(URIs):
"""parses S3 URIs, seperating out buckets and keys from URI"""
buckets, keys = [], []
for URI in URIs:
uri_path = path.normpath(URI).split("/")
buckets.append(uri_path[1])
keys.append(uri_path[2:])
return buckets, keys
def monitor_S3_key(**context):
"""
S3 monitor will log metrics for the target key, collecting the following metrics:
- size (MB)
- context type (MIME type)
- last modified timestamp
- metadata associated with the key
- parts count
- storage class
"""
s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)
target_path = context["target_s3_path"]
basename = context["path_basename"]
log_metric("target file", target_path)
boto3_key_object = s3_hook.get_key(key=target_path)
key_metrics = {
"{}-size(MB)".format(basename): (boto3_key_object.content_length / MB),
"{}-content_type".format(basename): boto3_key_object.content_type,
"{}-last_modified".format(basename): boto3_key_object.last_modified.__str__(),
"{}-metadata".format(basename): boto3_key_object.metadata,
"{}-parts_count".format(basename): boto3_key_object.parts_count,
}
key_metrics["{}-storage_class".format(basename)] = (
boto3_key_object.storage_class
if boto3_key_object.storage_class
else "s3 standard"
)
for metric_name, value in key_metrics.items():
log_metric(metric_name, value)
context["ti"].xcom_push("{}_key_metrics".format(basename), key_metrics)
def monitor_S3_prefix(**context):
"""
S3 monitor will monitor for the target prefix(s), collecting the following metrics:
- total size of prefix (MB)
- mean key size of prefix (MB)
- largest key with prefix (MB)
"""
s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)
target_prefix = "/".join(context["prefix"])
log_metric("prefix", target_prefix)
target_basename = context["prefix_basename"]
log_metric("basename", target_basename)
target_bucket = context["bucket"]
log_metric("bucket", target_bucket)
bucket = s3_hook.get_bucket(target_bucket)
total_size, num_objs = 0, 0
obj_sizes, last_modified = [], []
# find largest size in loop instead of using max() for optimization on large prefixes
largest_key_size = 0
for obj in bucket.objects.filter(Prefix=target_prefix):
total_size += obj.size
num_objs += 1
obj_sizes.append(obj.size)
if obj.size >= largest_key_size:
largest_key_size = obj.size
last_modified.append(obj.last_modified.__str__())
mean_key_size = (total_size / num_objs) / MB
prefix_metrics = {
"{}-total_size(MB)".format(target_basename): total_size / MB,
"{}-largest_key_size(MB)".format(target_basename): largest_key_size / MB,
"{}-mean_key_size(MB)".format(target_basename): mean_key_size / MB,
"{}-object_count".format(target_basename): num_objs,
}
for metric_name, metric_value in prefix_metrics.items():
log_metric(metric_name, metric_value)
context["ti"].xcom_push("{}_prefix_metrics".format(target_basename), prefix_metrics)
def aggregate_and_compare_metrics(**context):
"""
Aggregate and compare metrics. This task will log the following metrics:
- largest key
- largest key size
- largest prefix size
- total size of each prefix monitored
"""
# use the basename to build the correct task_ids:
key_basenames = context["key_basenames"]
prefix_basenames = context["prefix_basenames"]
key_monitor_task_ids = ["{}_monitor".format(basename) for basename in key_basenames]
prefix_monitor_task_ids = [
"{}_monitor".format(basename) for basename in prefix_basenames
]
# make a list of metrics hash tables pulled from xcom:
key_metrics_list = []
for basename, task_id in zip(key_basenames, key_monitor_task_ids):
key_metrics = context["ti"].xcom_pull(
task_ids=task_id, key="{}_key_metrics".format(basename)
)
if key_metrics:
key_metrics_list.append(key_metrics)
# join the hash tables by targets, place into appropriate keys
if key_metrics_list:
log_metric("key_metrics", key_metrics_list)
aggregated_key_metrics = {"targets": key_basenames}
for metrics in key_metrics_list:
for metric_name, metric_value in metrics.items():
metric_name = metric_name.split("-")[-1]
if metric_name in aggregated_key_metrics:
aggregated_key_metrics[metric_name].append(metric_value)
else:
aggregated_key_metrics[metric_name] = [metric_value]
# log largest file size metric
largest_key_size = max(aggregated_key_metrics["size(MB)"])
log_metric("largest_key_size(MB)", largest_key_size)
largest_key = aggregated_key_metrics["targets"][
aggregated_key_metrics["size(MB)"].index(largest_key_size)
]
log_metric("largest_key", largest_key)
# repreat the process for prefix monitor
prefix_metrics_list = []
log_metric("prefix task_ids", prefix_monitor_task_ids)
log_metric("prefix basenames", prefix_basenames)
for prefix_basename, task_id in zip(prefix_basenames, prefix_monitor_task_ids):
prefix_metrics = context["ti"].xcom_pull(
task_ids=task_id, key="{}_prefix_metrics".format(prefix_basename)
)
if prefix_metrics:
prefix_metrics_list.append(prefix_metrics)
if prefix_metrics_list:
log_metric("prefix_metrics", prefix_metrics_list)
aggregated_prefix_metrics = {"targets": prefix_basenames}
for metrics in prefix_metrics_list:
for metric_name, metric_value in metrics.items():
metric_name = metric_name.split("-")[-1]
if metric_name in aggregated_prefix_metrics:
aggregated_prefix_metrics[metric_name].append(metric_value)
else:
aggregated_prefix_metrics[metric_name] = [metric_value]
log_metric("aggregated prefix metrics", aggregated_prefix_metrics)
largest_prefix_by_mem = max(aggregated_prefix_metrics["total_size(MB)"])
largest_mem_prefix_name = aggregated_prefix_metrics["targets"][
aggregated_prefix_metrics["total_size(MB)"].index(largest_prefix_by_mem)
]
largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics["object_count"])
largest_obj_cnt_prefix_name = aggregated_prefix_metrics["targets"][
aggregated_prefix_metrics["object_count"].index(largest_prefix_by_obj_cnt)
]
log_metric("largest_prefix_name", largest_mem_prefix_name)
log_metric("largest_prefix_size(MB)", largest_prefix_by_mem)
log_metric("largest_prefix_by_obj_count", largest_prefix_by_obj_cnt)
log_metric("largest_obj_cnt_prefix_name", largest_obj_cnt_prefix_name)
with dag as s3_bucket_template_dag:
AirflowTasks = []
prefix_basenames = []
key_basenames = []
if TARGET_KEYS:
target_URIs = TARGET_KEYS.split(",")
target_buckets, target_keys = parse_s3_uri(target_URIs)
for URI, key in zip(target_URIs, target_keys):
basename = key[-1]
key_basenames.append(basename)
AirflowTasks.append(
PythonOperator(
task_id="{}_monitor".format(basename),
python_callable=monitor_S3_key,
op_kwargs={"target_s3_path": URI, "path_basename": basename},
)
)
if TARGET_PREFIXES:
target_prefix_paths = TARGET_PREFIXES.split(",")
bucket_names, prefixes = parse_s3_uri(target_prefix_paths)
for bucket_name, prefix in zip(bucket_names, prefixes):
basename = prefix[-1]
prefix_basenames.append(basename)
AirflowTasks.append(
PythonOperator(
task_id="{}_monitor".format(basename),
python_callable=monitor_S3_prefix,
op_kwargs={
"prefix": prefix,
"prefix_basename": basename,
"bucket": bucket_name,
},
)
)
compare_metrics_task = PythonOperator(
task_id="aggregate_and_compare_metrics",
python_callable=aggregate_and_compare_metrics,
op_kwargs={
"target_URIs": target_URIs,
"key_basenames": key_basenames,
"prefix_basenames": prefix_basenames,
},
)
for task in AirflowTasks:
task >> compare_metrics_task
|
[
"import json\n\nfrom os import path\n\nimport airflow\n\nfrom airflow import DAG\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.S3_hook import S3Hook\nfrom airflow.models import Variable\nfrom airflow.operators.python_operator import PythonOperator\nfrom dbnd import log_dataframe, log_metric, task\nfrom dbnd._core.constants import DbndTargetOperationType\n\n\n# Retreive Airflow Variables\nAWS_CONN_ID = Variable.get(\"AWS_s3_conn_id\")\nDAG_ID = Variable.get(\"s3_key_monitor_DAG_id\")\nS3_KEY_MONITOR_SCHEDULE = Variable.get(\"s3_key_monitor_schedule\")\ntry:\n TARGET_KEYS = Variable.get(\"s3_monitor_target_keys\")\nexcept:\n TARGET_KEYS = None\ntry:\n TARGET_PREFIXES = Variable.get(\"s3_monitor_target_prefixes\")\nexcept:\n TARGET_PREFIXES = None\n\nMB = 1048576 # conversion factor from Byte to MB\n\nDEFAULT_ARGS = {\n \"owner\": \"databand\",\n \"start_date\": airflow.utils.dates.days_ago(0),\n \"provide_context\": True,\n}\n\ndag = DAG(\n dag_id=DAG_ID,\n schedule_interval=\"{}\".format(S3_KEY_MONITOR_SCHEDULE),\n default_args=DEFAULT_ARGS,\n tags=[\"s3\", \"dbnd_monitor\"],\n)\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split(\"/\")\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n\n return buckets, keys\n\n\ndef monitor_S3_key(**context):\n \"\"\"\n S3 monitor will log metrics for the target key, collecting the following metrics:\n - size (MB)\n - context type (MIME type)\n - last modified timestamp\n - metadata associated with the key\n - parts count\n - storage class\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n\n target_path = context[\"target_s3_path\"]\n basename = context[\"path_basename\"]\n log_metric(\"target file\", target_path)\n\n boto3_key_object = s3_hook.get_key(key=target_path)\n\n key_metrics = {\n \"{}-size(MB)\".format(basename): (boto3_key_object.content_length / MB),\n \"{}-content_type\".format(basename): boto3_key_object.content_type,\n \"{}-last_modified\".format(basename): boto3_key_object.last_modified.__str__(),\n \"{}-metadata\".format(basename): boto3_key_object.metadata,\n \"{}-parts_count\".format(basename): boto3_key_object.parts_count,\n }\n\n key_metrics[\"{}-storage_class\".format(basename)] = (\n boto3_key_object.storage_class\n if boto3_key_object.storage_class\n else \"s3 standard\"\n )\n\n for metric_name, value in key_metrics.items():\n log_metric(metric_name, value)\n\n context[\"ti\"].xcom_push(\"{}_key_metrics\".format(basename), key_metrics)\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = \"/\".join(context[\"prefix\"])\n log_metric(\"prefix\", target_prefix)\n\n target_basename = context[\"prefix_basename\"]\n log_metric(\"basename\", target_basename)\n\n target_bucket = context[\"bucket\"]\n log_metric(\"bucket\", target_bucket)\n\n bucket = s3_hook.get_bucket(target_bucket)\n\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n\n # find largest size in loop instead of using max() for optimization on large prefixes\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n\n mean_key_size = (total_size / num_objs) / MB\n\n prefix_metrics = {\n \"{}-total_size(MB)\".format(target_basename): total_size / MB,\n \"{}-largest_key_size(MB)\".format(target_basename): largest_key_size / MB,\n \"{}-mean_key_size(MB)\".format(target_basename): mean_key_size / MB,\n \"{}-object_count\".format(target_basename): num_objs,\n }\n\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n\n context[\"ti\"].xcom_push(\"{}_prefix_metrics\".format(target_basename), prefix_metrics)\n\n\ndef aggregate_and_compare_metrics(**context):\n \"\"\"\n Aggregate and compare metrics. This task will log the following metrics:\n - largest key\n - largest key size\n - largest prefix size\n - total size of each prefix monitored\n \"\"\"\n # use the basename to build the correct task_ids:\n key_basenames = context[\"key_basenames\"]\n prefix_basenames = context[\"prefix_basenames\"]\n key_monitor_task_ids = [\"{}_monitor\".format(basename) for basename in key_basenames]\n prefix_monitor_task_ids = [\n \"{}_monitor\".format(basename) for basename in prefix_basenames\n ]\n\n # make a list of metrics hash tables pulled from xcom:\n key_metrics_list = []\n for basename, task_id in zip(key_basenames, key_monitor_task_ids):\n key_metrics = context[\"ti\"].xcom_pull(\n task_ids=task_id, key=\"{}_key_metrics\".format(basename)\n )\n if key_metrics:\n key_metrics_list.append(key_metrics)\n\n # join the hash tables by targets, place into appropriate keys\n if key_metrics_list:\n log_metric(\"key_metrics\", key_metrics_list)\n aggregated_key_metrics = {\"targets\": key_basenames}\n for metrics in key_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split(\"-\")[-1]\n if metric_name in aggregated_key_metrics:\n aggregated_key_metrics[metric_name].append(metric_value)\n else:\n aggregated_key_metrics[metric_name] = [metric_value]\n\n # log largest file size metric\n largest_key_size = max(aggregated_key_metrics[\"size(MB)\"])\n log_metric(\"largest_key_size(MB)\", largest_key_size)\n largest_key = aggregated_key_metrics[\"targets\"][\n aggregated_key_metrics[\"size(MB)\"].index(largest_key_size)\n ]\n log_metric(\"largest_key\", largest_key)\n\n # repreat the process for prefix monitor\n prefix_metrics_list = []\n log_metric(\"prefix task_ids\", prefix_monitor_task_ids)\n log_metric(\"prefix basenames\", prefix_basenames)\n for prefix_basename, task_id in zip(prefix_basenames, prefix_monitor_task_ids):\n prefix_metrics = context[\"ti\"].xcom_pull(\n task_ids=task_id, key=\"{}_prefix_metrics\".format(prefix_basename)\n )\n if prefix_metrics:\n prefix_metrics_list.append(prefix_metrics)\n\n if prefix_metrics_list:\n log_metric(\"prefix_metrics\", prefix_metrics_list)\n aggregated_prefix_metrics = {\"targets\": prefix_basenames}\n for metrics in prefix_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split(\"-\")[-1]\n if metric_name in aggregated_prefix_metrics:\n aggregated_prefix_metrics[metric_name].append(metric_value)\n else:\n aggregated_prefix_metrics[metric_name] = [metric_value]\n log_metric(\"aggregated prefix metrics\", aggregated_prefix_metrics)\n\n largest_prefix_by_mem = max(aggregated_prefix_metrics[\"total_size(MB)\"])\n largest_mem_prefix_name = aggregated_prefix_metrics[\"targets\"][\n aggregated_prefix_metrics[\"total_size(MB)\"].index(largest_prefix_by_mem)\n ]\n largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics[\"object_count\"])\n largest_obj_cnt_prefix_name = aggregated_prefix_metrics[\"targets\"][\n aggregated_prefix_metrics[\"object_count\"].index(largest_prefix_by_obj_cnt)\n ]\n log_metric(\"largest_prefix_name\", largest_mem_prefix_name)\n log_metric(\"largest_prefix_size(MB)\", largest_prefix_by_mem)\n log_metric(\"largest_prefix_by_obj_count\", largest_prefix_by_obj_cnt)\n log_metric(\"largest_obj_cnt_prefix_name\", largest_obj_cnt_prefix_name)\n\n\nwith dag as s3_bucket_template_dag:\n\n AirflowTasks = []\n prefix_basenames = []\n key_basenames = []\n if TARGET_KEYS:\n target_URIs = TARGET_KEYS.split(\",\")\n target_buckets, target_keys = parse_s3_uri(target_URIs)\n\n for URI, key in zip(target_URIs, target_keys):\n basename = key[-1]\n key_basenames.append(basename)\n\n AirflowTasks.append(\n PythonOperator(\n task_id=\"{}_monitor\".format(basename),\n python_callable=monitor_S3_key,\n op_kwargs={\"target_s3_path\": URI, \"path_basename\": basename},\n )\n )\n\n if TARGET_PREFIXES:\n target_prefix_paths = TARGET_PREFIXES.split(\",\")\n bucket_names, prefixes = parse_s3_uri(target_prefix_paths)\n\n for bucket_name, prefix in zip(bucket_names, prefixes):\n basename = prefix[-1]\n prefix_basenames.append(basename)\n\n AirflowTasks.append(\n PythonOperator(\n task_id=\"{}_monitor\".format(basename),\n python_callable=monitor_S3_prefix,\n op_kwargs={\n \"prefix\": prefix,\n \"prefix_basename\": basename,\n \"bucket\": bucket_name,\n },\n )\n )\n\n compare_metrics_task = PythonOperator(\n task_id=\"aggregate_and_compare_metrics\",\n python_callable=aggregate_and_compare_metrics,\n op_kwargs={\n \"target_URIs\": target_URIs,\n \"key_basenames\": key_basenames,\n \"prefix_basenames\": prefix_basenames,\n },\n )\n\n for task in AirflowTasks:\n task >> compare_metrics_task\n",
"import json\nfrom os import path\nimport airflow\nfrom airflow import DAG\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.S3_hook import S3Hook\nfrom airflow.models import Variable\nfrom airflow.operators.python_operator import PythonOperator\nfrom dbnd import log_dataframe, log_metric, task\nfrom dbnd._core.constants import DbndTargetOperationType\nAWS_CONN_ID = Variable.get('AWS_s3_conn_id')\nDAG_ID = Variable.get('s3_key_monitor_DAG_id')\nS3_KEY_MONITOR_SCHEDULE = Variable.get('s3_key_monitor_schedule')\ntry:\n TARGET_KEYS = Variable.get('s3_monitor_target_keys')\nexcept:\n TARGET_KEYS = None\ntry:\n TARGET_PREFIXES = Variable.get('s3_monitor_target_prefixes')\nexcept:\n TARGET_PREFIXES = None\nMB = 1048576\nDEFAULT_ARGS = {'owner': 'databand', 'start_date': airflow.utils.dates.\n days_ago(0), 'provide_context': True}\ndag = DAG(dag_id=DAG_ID, schedule_interval='{}'.format(\n S3_KEY_MONITOR_SCHEDULE), default_args=DEFAULT_ARGS, tags=['s3',\n 'dbnd_monitor'])\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split('/')\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n return buckets, keys\n\n\ndef monitor_S3_key(**context):\n \"\"\"\n S3 monitor will log metrics for the target key, collecting the following metrics:\n - size (MB)\n - context type (MIME type)\n - last modified timestamp\n - metadata associated with the key\n - parts count\n - storage class\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_path = context['target_s3_path']\n basename = context['path_basename']\n log_metric('target file', target_path)\n boto3_key_object = s3_hook.get_key(key=target_path)\n key_metrics = {'{}-size(MB)'.format(basename): boto3_key_object.\n content_length / MB, '{}-content_type'.format(basename):\n boto3_key_object.content_type, '{}-last_modified'.format(basename):\n boto3_key_object.last_modified.__str__(), '{}-metadata'.format(\n basename): boto3_key_object.metadata, '{}-parts_count'.format(\n basename): boto3_key_object.parts_count}\n key_metrics['{}-storage_class'.format(basename)] = (boto3_key_object.\n storage_class if boto3_key_object.storage_class else 's3 standard')\n for metric_name, value in key_metrics.items():\n log_metric(metric_name, value)\n context['ti'].xcom_push('{}_key_metrics'.format(basename), key_metrics)\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\ndef aggregate_and_compare_metrics(**context):\n \"\"\"\n Aggregate and compare metrics. This task will log the following metrics:\n - largest key\n - largest key size\n - largest prefix size\n - total size of each prefix monitored\n \"\"\"\n key_basenames = context['key_basenames']\n prefix_basenames = context['prefix_basenames']\n key_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n key_basenames]\n prefix_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n prefix_basenames]\n key_metrics_list = []\n for basename, task_id in zip(key_basenames, key_monitor_task_ids):\n key_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_key_metrics'.format(basename))\n if key_metrics:\n key_metrics_list.append(key_metrics)\n if key_metrics_list:\n log_metric('key_metrics', key_metrics_list)\n aggregated_key_metrics = {'targets': key_basenames}\n for metrics in key_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_key_metrics:\n aggregated_key_metrics[metric_name].append(metric_value)\n else:\n aggregated_key_metrics[metric_name] = [metric_value]\n largest_key_size = max(aggregated_key_metrics['size(MB)'])\n log_metric('largest_key_size(MB)', largest_key_size)\n largest_key = aggregated_key_metrics['targets'][aggregated_key_metrics\n ['size(MB)'].index(largest_key_size)]\n log_metric('largest_key', largest_key)\n prefix_metrics_list = []\n log_metric('prefix task_ids', prefix_monitor_task_ids)\n log_metric('prefix basenames', prefix_basenames)\n for prefix_basename, task_id in zip(prefix_basenames,\n prefix_monitor_task_ids):\n prefix_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_prefix_metrics'.format(prefix_basename))\n if prefix_metrics:\n prefix_metrics_list.append(prefix_metrics)\n if prefix_metrics_list:\n log_metric('prefix_metrics', prefix_metrics_list)\n aggregated_prefix_metrics = {'targets': prefix_basenames}\n for metrics in prefix_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_prefix_metrics:\n aggregated_prefix_metrics[metric_name].append(metric_value)\n else:\n aggregated_prefix_metrics[metric_name] = [metric_value]\n log_metric('aggregated prefix metrics', aggregated_prefix_metrics)\n largest_prefix_by_mem = max(aggregated_prefix_metrics['total_size(MB)']\n )\n largest_mem_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['total_size(MB)'].index(\n largest_prefix_by_mem)]\n largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics[\n 'object_count'])\n largest_obj_cnt_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['object_count'].index(\n largest_prefix_by_obj_cnt)]\n log_metric('largest_prefix_name', largest_mem_prefix_name)\n log_metric('largest_prefix_size(MB)', largest_prefix_by_mem)\n log_metric('largest_prefix_by_obj_count', largest_prefix_by_obj_cnt)\n log_metric('largest_obj_cnt_prefix_name', largest_obj_cnt_prefix_name)\n\n\nwith dag as s3_bucket_template_dag:\n AirflowTasks = []\n prefix_basenames = []\n key_basenames = []\n if TARGET_KEYS:\n target_URIs = TARGET_KEYS.split(',')\n target_buckets, target_keys = parse_s3_uri(target_URIs)\n for URI, key in zip(target_URIs, target_keys):\n basename = key[-1]\n key_basenames.append(basename)\n AirflowTasks.append(PythonOperator(task_id='{}_monitor'.format(\n basename), python_callable=monitor_S3_key, op_kwargs={\n 'target_s3_path': URI, 'path_basename': basename}))\n if TARGET_PREFIXES:\n target_prefix_paths = TARGET_PREFIXES.split(',')\n bucket_names, prefixes = parse_s3_uri(target_prefix_paths)\n for bucket_name, prefix in zip(bucket_names, prefixes):\n basename = prefix[-1]\n prefix_basenames.append(basename)\n AirflowTasks.append(PythonOperator(task_id='{}_monitor'.format(\n basename), python_callable=monitor_S3_prefix, op_kwargs={\n 'prefix': prefix, 'prefix_basename': basename, 'bucket':\n bucket_name}))\n compare_metrics_task = PythonOperator(task_id=\n 'aggregate_and_compare_metrics', python_callable=\n aggregate_and_compare_metrics, op_kwargs={'target_URIs':\n target_URIs, 'key_basenames': key_basenames, 'prefix_basenames':\n prefix_basenames})\n for task in AirflowTasks:\n task >> compare_metrics_task\n",
"<import token>\nAWS_CONN_ID = Variable.get('AWS_s3_conn_id')\nDAG_ID = Variable.get('s3_key_monitor_DAG_id')\nS3_KEY_MONITOR_SCHEDULE = Variable.get('s3_key_monitor_schedule')\ntry:\n TARGET_KEYS = Variable.get('s3_monitor_target_keys')\nexcept:\n TARGET_KEYS = None\ntry:\n TARGET_PREFIXES = Variable.get('s3_monitor_target_prefixes')\nexcept:\n TARGET_PREFIXES = None\nMB = 1048576\nDEFAULT_ARGS = {'owner': 'databand', 'start_date': airflow.utils.dates.\n days_ago(0), 'provide_context': True}\ndag = DAG(dag_id=DAG_ID, schedule_interval='{}'.format(\n S3_KEY_MONITOR_SCHEDULE), default_args=DEFAULT_ARGS, tags=['s3',\n 'dbnd_monitor'])\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split('/')\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n return buckets, keys\n\n\ndef monitor_S3_key(**context):\n \"\"\"\n S3 monitor will log metrics for the target key, collecting the following metrics:\n - size (MB)\n - context type (MIME type)\n - last modified timestamp\n - metadata associated with the key\n - parts count\n - storage class\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_path = context['target_s3_path']\n basename = context['path_basename']\n log_metric('target file', target_path)\n boto3_key_object = s3_hook.get_key(key=target_path)\n key_metrics = {'{}-size(MB)'.format(basename): boto3_key_object.\n content_length / MB, '{}-content_type'.format(basename):\n boto3_key_object.content_type, '{}-last_modified'.format(basename):\n boto3_key_object.last_modified.__str__(), '{}-metadata'.format(\n basename): boto3_key_object.metadata, '{}-parts_count'.format(\n basename): boto3_key_object.parts_count}\n key_metrics['{}-storage_class'.format(basename)] = (boto3_key_object.\n storage_class if boto3_key_object.storage_class else 's3 standard')\n for metric_name, value in key_metrics.items():\n log_metric(metric_name, value)\n context['ti'].xcom_push('{}_key_metrics'.format(basename), key_metrics)\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\ndef aggregate_and_compare_metrics(**context):\n \"\"\"\n Aggregate and compare metrics. This task will log the following metrics:\n - largest key\n - largest key size\n - largest prefix size\n - total size of each prefix monitored\n \"\"\"\n key_basenames = context['key_basenames']\n prefix_basenames = context['prefix_basenames']\n key_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n key_basenames]\n prefix_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n prefix_basenames]\n key_metrics_list = []\n for basename, task_id in zip(key_basenames, key_monitor_task_ids):\n key_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_key_metrics'.format(basename))\n if key_metrics:\n key_metrics_list.append(key_metrics)\n if key_metrics_list:\n log_metric('key_metrics', key_metrics_list)\n aggregated_key_metrics = {'targets': key_basenames}\n for metrics in key_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_key_metrics:\n aggregated_key_metrics[metric_name].append(metric_value)\n else:\n aggregated_key_metrics[metric_name] = [metric_value]\n largest_key_size = max(aggregated_key_metrics['size(MB)'])\n log_metric('largest_key_size(MB)', largest_key_size)\n largest_key = aggregated_key_metrics['targets'][aggregated_key_metrics\n ['size(MB)'].index(largest_key_size)]\n log_metric('largest_key', largest_key)\n prefix_metrics_list = []\n log_metric('prefix task_ids', prefix_monitor_task_ids)\n log_metric('prefix basenames', prefix_basenames)\n for prefix_basename, task_id in zip(prefix_basenames,\n prefix_monitor_task_ids):\n prefix_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_prefix_metrics'.format(prefix_basename))\n if prefix_metrics:\n prefix_metrics_list.append(prefix_metrics)\n if prefix_metrics_list:\n log_metric('prefix_metrics', prefix_metrics_list)\n aggregated_prefix_metrics = {'targets': prefix_basenames}\n for metrics in prefix_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_prefix_metrics:\n aggregated_prefix_metrics[metric_name].append(metric_value)\n else:\n aggregated_prefix_metrics[metric_name] = [metric_value]\n log_metric('aggregated prefix metrics', aggregated_prefix_metrics)\n largest_prefix_by_mem = max(aggregated_prefix_metrics['total_size(MB)']\n )\n largest_mem_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['total_size(MB)'].index(\n largest_prefix_by_mem)]\n largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics[\n 'object_count'])\n largest_obj_cnt_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['object_count'].index(\n largest_prefix_by_obj_cnt)]\n log_metric('largest_prefix_name', largest_mem_prefix_name)\n log_metric('largest_prefix_size(MB)', largest_prefix_by_mem)\n log_metric('largest_prefix_by_obj_count', largest_prefix_by_obj_cnt)\n log_metric('largest_obj_cnt_prefix_name', largest_obj_cnt_prefix_name)\n\n\nwith dag as s3_bucket_template_dag:\n AirflowTasks = []\n prefix_basenames = []\n key_basenames = []\n if TARGET_KEYS:\n target_URIs = TARGET_KEYS.split(',')\n target_buckets, target_keys = parse_s3_uri(target_URIs)\n for URI, key in zip(target_URIs, target_keys):\n basename = key[-1]\n key_basenames.append(basename)\n AirflowTasks.append(PythonOperator(task_id='{}_monitor'.format(\n basename), python_callable=monitor_S3_key, op_kwargs={\n 'target_s3_path': URI, 'path_basename': basename}))\n if TARGET_PREFIXES:\n target_prefix_paths = TARGET_PREFIXES.split(',')\n bucket_names, prefixes = parse_s3_uri(target_prefix_paths)\n for bucket_name, prefix in zip(bucket_names, prefixes):\n basename = prefix[-1]\n prefix_basenames.append(basename)\n AirflowTasks.append(PythonOperator(task_id='{}_monitor'.format(\n basename), python_callable=monitor_S3_prefix, op_kwargs={\n 'prefix': prefix, 'prefix_basename': basename, 'bucket':\n bucket_name}))\n compare_metrics_task = PythonOperator(task_id=\n 'aggregate_and_compare_metrics', python_callable=\n aggregate_and_compare_metrics, op_kwargs={'target_URIs':\n target_URIs, 'key_basenames': key_basenames, 'prefix_basenames':\n prefix_basenames})\n for task in AirflowTasks:\n task >> compare_metrics_task\n",
"<import token>\n<assignment token>\ntry:\n TARGET_KEYS = Variable.get('s3_monitor_target_keys')\nexcept:\n TARGET_KEYS = None\ntry:\n TARGET_PREFIXES = Variable.get('s3_monitor_target_prefixes')\nexcept:\n TARGET_PREFIXES = None\n<assignment token>\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split('/')\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n return buckets, keys\n\n\ndef monitor_S3_key(**context):\n \"\"\"\n S3 monitor will log metrics for the target key, collecting the following metrics:\n - size (MB)\n - context type (MIME type)\n - last modified timestamp\n - metadata associated with the key\n - parts count\n - storage class\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_path = context['target_s3_path']\n basename = context['path_basename']\n log_metric('target file', target_path)\n boto3_key_object = s3_hook.get_key(key=target_path)\n key_metrics = {'{}-size(MB)'.format(basename): boto3_key_object.\n content_length / MB, '{}-content_type'.format(basename):\n boto3_key_object.content_type, '{}-last_modified'.format(basename):\n boto3_key_object.last_modified.__str__(), '{}-metadata'.format(\n basename): boto3_key_object.metadata, '{}-parts_count'.format(\n basename): boto3_key_object.parts_count}\n key_metrics['{}-storage_class'.format(basename)] = (boto3_key_object.\n storage_class if boto3_key_object.storage_class else 's3 standard')\n for metric_name, value in key_metrics.items():\n log_metric(metric_name, value)\n context['ti'].xcom_push('{}_key_metrics'.format(basename), key_metrics)\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\ndef aggregate_and_compare_metrics(**context):\n \"\"\"\n Aggregate and compare metrics. This task will log the following metrics:\n - largest key\n - largest key size\n - largest prefix size\n - total size of each prefix monitored\n \"\"\"\n key_basenames = context['key_basenames']\n prefix_basenames = context['prefix_basenames']\n key_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n key_basenames]\n prefix_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n prefix_basenames]\n key_metrics_list = []\n for basename, task_id in zip(key_basenames, key_monitor_task_ids):\n key_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_key_metrics'.format(basename))\n if key_metrics:\n key_metrics_list.append(key_metrics)\n if key_metrics_list:\n log_metric('key_metrics', key_metrics_list)\n aggregated_key_metrics = {'targets': key_basenames}\n for metrics in key_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_key_metrics:\n aggregated_key_metrics[metric_name].append(metric_value)\n else:\n aggregated_key_metrics[metric_name] = [metric_value]\n largest_key_size = max(aggregated_key_metrics['size(MB)'])\n log_metric('largest_key_size(MB)', largest_key_size)\n largest_key = aggregated_key_metrics['targets'][aggregated_key_metrics\n ['size(MB)'].index(largest_key_size)]\n log_metric('largest_key', largest_key)\n prefix_metrics_list = []\n log_metric('prefix task_ids', prefix_monitor_task_ids)\n log_metric('prefix basenames', prefix_basenames)\n for prefix_basename, task_id in zip(prefix_basenames,\n prefix_monitor_task_ids):\n prefix_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_prefix_metrics'.format(prefix_basename))\n if prefix_metrics:\n prefix_metrics_list.append(prefix_metrics)\n if prefix_metrics_list:\n log_metric('prefix_metrics', prefix_metrics_list)\n aggregated_prefix_metrics = {'targets': prefix_basenames}\n for metrics in prefix_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_prefix_metrics:\n aggregated_prefix_metrics[metric_name].append(metric_value)\n else:\n aggregated_prefix_metrics[metric_name] = [metric_value]\n log_metric('aggregated prefix metrics', aggregated_prefix_metrics)\n largest_prefix_by_mem = max(aggregated_prefix_metrics['total_size(MB)']\n )\n largest_mem_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['total_size(MB)'].index(\n largest_prefix_by_mem)]\n largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics[\n 'object_count'])\n largest_obj_cnt_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['object_count'].index(\n largest_prefix_by_obj_cnt)]\n log_metric('largest_prefix_name', largest_mem_prefix_name)\n log_metric('largest_prefix_size(MB)', largest_prefix_by_mem)\n log_metric('largest_prefix_by_obj_count', largest_prefix_by_obj_cnt)\n log_metric('largest_obj_cnt_prefix_name', largest_obj_cnt_prefix_name)\n\n\nwith dag as s3_bucket_template_dag:\n AirflowTasks = []\n prefix_basenames = []\n key_basenames = []\n if TARGET_KEYS:\n target_URIs = TARGET_KEYS.split(',')\n target_buckets, target_keys = parse_s3_uri(target_URIs)\n for URI, key in zip(target_URIs, target_keys):\n basename = key[-1]\n key_basenames.append(basename)\n AirflowTasks.append(PythonOperator(task_id='{}_monitor'.format(\n basename), python_callable=monitor_S3_key, op_kwargs={\n 'target_s3_path': URI, 'path_basename': basename}))\n if TARGET_PREFIXES:\n target_prefix_paths = TARGET_PREFIXES.split(',')\n bucket_names, prefixes = parse_s3_uri(target_prefix_paths)\n for bucket_name, prefix in zip(bucket_names, prefixes):\n basename = prefix[-1]\n prefix_basenames.append(basename)\n AirflowTasks.append(PythonOperator(task_id='{}_monitor'.format(\n basename), python_callable=monitor_S3_prefix, op_kwargs={\n 'prefix': prefix, 'prefix_basename': basename, 'bucket':\n bucket_name}))\n compare_metrics_task = PythonOperator(task_id=\n 'aggregate_and_compare_metrics', python_callable=\n aggregate_and_compare_metrics, op_kwargs={'target_URIs':\n target_URIs, 'key_basenames': key_basenames, 'prefix_basenames':\n prefix_basenames})\n for task in AirflowTasks:\n task >> compare_metrics_task\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split('/')\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n return buckets, keys\n\n\ndef monitor_S3_key(**context):\n \"\"\"\n S3 monitor will log metrics for the target key, collecting the following metrics:\n - size (MB)\n - context type (MIME type)\n - last modified timestamp\n - metadata associated with the key\n - parts count\n - storage class\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_path = context['target_s3_path']\n basename = context['path_basename']\n log_metric('target file', target_path)\n boto3_key_object = s3_hook.get_key(key=target_path)\n key_metrics = {'{}-size(MB)'.format(basename): boto3_key_object.\n content_length / MB, '{}-content_type'.format(basename):\n boto3_key_object.content_type, '{}-last_modified'.format(basename):\n boto3_key_object.last_modified.__str__(), '{}-metadata'.format(\n basename): boto3_key_object.metadata, '{}-parts_count'.format(\n basename): boto3_key_object.parts_count}\n key_metrics['{}-storage_class'.format(basename)] = (boto3_key_object.\n storage_class if boto3_key_object.storage_class else 's3 standard')\n for metric_name, value in key_metrics.items():\n log_metric(metric_name, value)\n context['ti'].xcom_push('{}_key_metrics'.format(basename), key_metrics)\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\ndef aggregate_and_compare_metrics(**context):\n \"\"\"\n Aggregate and compare metrics. This task will log the following metrics:\n - largest key\n - largest key size\n - largest prefix size\n - total size of each prefix monitored\n \"\"\"\n key_basenames = context['key_basenames']\n prefix_basenames = context['prefix_basenames']\n key_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n key_basenames]\n prefix_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n prefix_basenames]\n key_metrics_list = []\n for basename, task_id in zip(key_basenames, key_monitor_task_ids):\n key_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_key_metrics'.format(basename))\n if key_metrics:\n key_metrics_list.append(key_metrics)\n if key_metrics_list:\n log_metric('key_metrics', key_metrics_list)\n aggregated_key_metrics = {'targets': key_basenames}\n for metrics in key_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_key_metrics:\n aggregated_key_metrics[metric_name].append(metric_value)\n else:\n aggregated_key_metrics[metric_name] = [metric_value]\n largest_key_size = max(aggregated_key_metrics['size(MB)'])\n log_metric('largest_key_size(MB)', largest_key_size)\n largest_key = aggregated_key_metrics['targets'][aggregated_key_metrics\n ['size(MB)'].index(largest_key_size)]\n log_metric('largest_key', largest_key)\n prefix_metrics_list = []\n log_metric('prefix task_ids', prefix_monitor_task_ids)\n log_metric('prefix basenames', prefix_basenames)\n for prefix_basename, task_id in zip(prefix_basenames,\n prefix_monitor_task_ids):\n prefix_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_prefix_metrics'.format(prefix_basename))\n if prefix_metrics:\n prefix_metrics_list.append(prefix_metrics)\n if prefix_metrics_list:\n log_metric('prefix_metrics', prefix_metrics_list)\n aggregated_prefix_metrics = {'targets': prefix_basenames}\n for metrics in prefix_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_prefix_metrics:\n aggregated_prefix_metrics[metric_name].append(metric_value)\n else:\n aggregated_prefix_metrics[metric_name] = [metric_value]\n log_metric('aggregated prefix metrics', aggregated_prefix_metrics)\n largest_prefix_by_mem = max(aggregated_prefix_metrics['total_size(MB)']\n )\n largest_mem_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['total_size(MB)'].index(\n largest_prefix_by_mem)]\n largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics[\n 'object_count'])\n largest_obj_cnt_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['object_count'].index(\n largest_prefix_by_obj_cnt)]\n log_metric('largest_prefix_name', largest_mem_prefix_name)\n log_metric('largest_prefix_size(MB)', largest_prefix_by_mem)\n log_metric('largest_prefix_by_obj_count', largest_prefix_by_obj_cnt)\n log_metric('largest_obj_cnt_prefix_name', largest_obj_cnt_prefix_name)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split('/')\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n return buckets, keys\n\n\n<function token>\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\ndef aggregate_and_compare_metrics(**context):\n \"\"\"\n Aggregate and compare metrics. This task will log the following metrics:\n - largest key\n - largest key size\n - largest prefix size\n - total size of each prefix monitored\n \"\"\"\n key_basenames = context['key_basenames']\n prefix_basenames = context['prefix_basenames']\n key_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n key_basenames]\n prefix_monitor_task_ids = ['{}_monitor'.format(basename) for basename in\n prefix_basenames]\n key_metrics_list = []\n for basename, task_id in zip(key_basenames, key_monitor_task_ids):\n key_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_key_metrics'.format(basename))\n if key_metrics:\n key_metrics_list.append(key_metrics)\n if key_metrics_list:\n log_metric('key_metrics', key_metrics_list)\n aggregated_key_metrics = {'targets': key_basenames}\n for metrics in key_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_key_metrics:\n aggregated_key_metrics[metric_name].append(metric_value)\n else:\n aggregated_key_metrics[metric_name] = [metric_value]\n largest_key_size = max(aggregated_key_metrics['size(MB)'])\n log_metric('largest_key_size(MB)', largest_key_size)\n largest_key = aggregated_key_metrics['targets'][aggregated_key_metrics\n ['size(MB)'].index(largest_key_size)]\n log_metric('largest_key', largest_key)\n prefix_metrics_list = []\n log_metric('prefix task_ids', prefix_monitor_task_ids)\n log_metric('prefix basenames', prefix_basenames)\n for prefix_basename, task_id in zip(prefix_basenames,\n prefix_monitor_task_ids):\n prefix_metrics = context['ti'].xcom_pull(task_ids=task_id, key=\n '{}_prefix_metrics'.format(prefix_basename))\n if prefix_metrics:\n prefix_metrics_list.append(prefix_metrics)\n if prefix_metrics_list:\n log_metric('prefix_metrics', prefix_metrics_list)\n aggregated_prefix_metrics = {'targets': prefix_basenames}\n for metrics in prefix_metrics_list:\n for metric_name, metric_value in metrics.items():\n metric_name = metric_name.split('-')[-1]\n if metric_name in aggregated_prefix_metrics:\n aggregated_prefix_metrics[metric_name].append(metric_value)\n else:\n aggregated_prefix_metrics[metric_name] = [metric_value]\n log_metric('aggregated prefix metrics', aggregated_prefix_metrics)\n largest_prefix_by_mem = max(aggregated_prefix_metrics['total_size(MB)']\n )\n largest_mem_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['total_size(MB)'].index(\n largest_prefix_by_mem)]\n largest_prefix_by_obj_cnt = max(aggregated_prefix_metrics[\n 'object_count'])\n largest_obj_cnt_prefix_name = aggregated_prefix_metrics['targets'][\n aggregated_prefix_metrics['object_count'].index(\n largest_prefix_by_obj_cnt)]\n log_metric('largest_prefix_name', largest_mem_prefix_name)\n log_metric('largest_prefix_size(MB)', largest_prefix_by_mem)\n log_metric('largest_prefix_by_obj_count', largest_prefix_by_obj_cnt)\n log_metric('largest_obj_cnt_prefix_name', largest_obj_cnt_prefix_name)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef parse_s3_uri(URIs):\n \"\"\"parses S3 URIs, seperating out buckets and keys from URI\"\"\"\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split('/')\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n return buckets, keys\n\n\n<function token>\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef monitor_S3_prefix(**context):\n \"\"\"\n S3 monitor will monitor for the target prefix(s), collecting the following metrics:\n - total size of prefix (MB)\n - mean key size of prefix (MB)\n - largest key with prefix (MB)\n \"\"\"\n s3_hook = S3Hook(aws_conn_id=AWS_CONN_ID)\n target_prefix = '/'.join(context['prefix'])\n log_metric('prefix', target_prefix)\n target_basename = context['prefix_basename']\n log_metric('basename', target_basename)\n target_bucket = context['bucket']\n log_metric('bucket', target_bucket)\n bucket = s3_hook.get_bucket(target_bucket)\n total_size, num_objs = 0, 0\n obj_sizes, last_modified = [], []\n largest_key_size = 0\n for obj in bucket.objects.filter(Prefix=target_prefix):\n total_size += obj.size\n num_objs += 1\n obj_sizes.append(obj.size)\n if obj.size >= largest_key_size:\n largest_key_size = obj.size\n last_modified.append(obj.last_modified.__str__())\n mean_key_size = total_size / num_objs / MB\n prefix_metrics = {'{}-total_size(MB)'.format(target_basename): \n total_size / MB, '{}-largest_key_size(MB)'.format(target_basename):\n largest_key_size / MB, '{}-mean_key_size(MB)'.format(\n target_basename): mean_key_size / MB, '{}-object_count'.format(\n target_basename): num_objs}\n for metric_name, metric_value in prefix_metrics.items():\n log_metric(metric_name, metric_value)\n context['ti'].xcom_push('{}_prefix_metrics'.format(target_basename),\n prefix_metrics)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,498 |
b7cb5fbcc92a58e888f853509b8d94d582f7a66f
|
"""
Description: Program to download all available raw data from the Mass.gov website https://www.mass.gov/info-details/archive-of-covid-19-cases-in-massachusetts#related-.
Author: Anthony Badea
Date: June 7, 2020
"""
import os
import sys
import glob
import csv
import requests, zipfile
from collections import OrderedDict
# Download files associated with date and return string to download directory
def download_data(date = "july-23-2020",
out_dir = "../data/MA/raw"):
url = "https://www.mass.gov/doc/covid-19-raw-data-{}/download".format(date)
r = requests.get(url, allow_redirects=True)
try: z = zipfile.ZipFile(r.content)
except:
print("Date {} doesn't exist".format(date))
return None
out_dir = os.path.join(out_dir,date)
if os.path.isdir(out_dir) is not True:
os.mkdir(out_dir)
z.extractall(out_dir)
return os.path.join(out_dir,date)
# Return dictionary of available dates (key,val) = (date, "")
# The value will be updated by download_data into the local directory of the file
def make_available_data_dict():
months = ['april','may','june']
days = range(1,32)
available_data = OrderedDict()
for year in ['2020']:
for month in ['april','may','june']:
for day in range(1,32):
available_data[month+'-{}'.format(day)+'-{}'.format(year)] = ""
return available_data
# Download all days inside of dictionary outputted by make_available_data_dict
# Return a dictionary with (key,val) = (date, local directory to data)
def download_data_batch(out_dir = "../data/MA/raw"):
data_dict = make_available_data_dict()
if os.path.isdir(out_dir) is not True: os.mkdir(out_dir)
for date in data_dict.keys():
data_dict[date] = download_data(date,out_dir)
if data_dict[date] is None: data_dict.pop(date)
return data_dict
#data_dict = download_data_batch()
#print(data_dict)
# Process all data inside of in_dir and produce a single data file
# Anthony you are here processing the raw data!!!
def process_data(in_dir = "",
out_dir = ""):
# Output data dictionary
data = {}
list_of_dirs = glob.glob(in_dir+'/*')
if len(list_of_dirs) is 0:
list_of_dirs = [in_dir]
print(list_of_dirs)
for lD in list_of_dirs:
list_of_files = glob.glob(lD+'/*.csv')
for file in list_of_files:
csv_in = csv.DictReader(open(file))
for row in csv_in:
print(row)
download_data()
process_data('../data/MA/raw/july-23-2020')
|
[
"\"\"\"\nDescription: Program to download all available raw data from the Mass.gov website https://www.mass.gov/info-details/archive-of-covid-19-cases-in-massachusetts#related-. \n\nAuthor: Anthony Badea\nDate: June 7, 2020\n\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport csv\nimport requests, zipfile\nfrom collections import OrderedDict\n\n# Download files associated with date and return string to download directory\ndef download_data(date = \"july-23-2020\",\n\t\t\t\t out_dir = \"../data/MA/raw\"):\n\turl = \"https://www.mass.gov/doc/covid-19-raw-data-{}/download\".format(date)\n\tr = requests.get(url, allow_redirects=True)\n\ttry: z = zipfile.ZipFile(r.content)\n\texcept:\n\t\tprint(\"Date {} doesn't exist\".format(date))\n\t\treturn None\n\tout_dir = os.path.join(out_dir,date)\n\tif os.path.isdir(out_dir) is not True:\n\t\tos.mkdir(out_dir)\n\tz.extractall(out_dir)\n\treturn os.path.join(out_dir,date)\n\t\t\t\t \n\n# Return dictionary of available dates (key,val) = (date, \"\")\n# The value will be updated by download_data into the local directory of the file\ndef make_available_data_dict():\n\tmonths = ['april','may','june']\n\tdays = range(1,32)\n\tavailable_data = OrderedDict()\n\tfor year in ['2020']:\n\t\tfor month in ['april','may','june']:\n\t\t\tfor day in range(1,32):\n\t\t\t\tavailable_data[month+'-{}'.format(day)+'-{}'.format(year)] = \"\"\n\treturn available_data\n\n# Download all days inside of dictionary outputted by make_available_data_dict\n# Return a dictionary with (key,val) = (date, local directory to data)\ndef download_data_batch(out_dir = \"../data/MA/raw\"):\n\tdata_dict = make_available_data_dict()\n\tif os.path.isdir(out_dir) is not True: os.mkdir(out_dir)\n\tfor date in data_dict.keys():\n\t\tdata_dict[date] = download_data(date,out_dir)\n\t\tif data_dict[date] is None: data_dict.pop(date)\n\treturn data_dict\n\n#data_dict = download_data_batch()\n#print(data_dict)\n\n# Process all data inside of in_dir and produce a single data file\n# Anthony you are here processing the raw data!!!\ndef process_data(in_dir = \"\",\n\t\t\t\t out_dir = \"\"):\n\t# Output data dictionary\n\tdata = {}\n\tlist_of_dirs = glob.glob(in_dir+'/*')\n\tif len(list_of_dirs) is 0:\n\t\tlist_of_dirs = [in_dir]\n\tprint(list_of_dirs)\n\tfor lD in list_of_dirs:\n\t\tlist_of_files = glob.glob(lD+'/*.csv')\n\t\tfor file in list_of_files:\n\t\t\tcsv_in = csv.DictReader(open(file))\n\t\t\tfor row in csv_in:\n\t\t\t\tprint(row)\n\ndownload_data()\n\nprocess_data('../data/MA/raw/july-23-2020')\n\n\n\n\n\n\n\n",
"<docstring token>\nimport os\nimport sys\nimport glob\nimport csv\nimport requests, zipfile\nfrom collections import OrderedDict\n\n\ndef download_data(date='july-23-2020', out_dir='../data/MA/raw'):\n url = 'https://www.mass.gov/doc/covid-19-raw-data-{}/download'.format(date)\n r = requests.get(url, allow_redirects=True)\n try:\n z = zipfile.ZipFile(r.content)\n except:\n print(\"Date {} doesn't exist\".format(date))\n return None\n out_dir = os.path.join(out_dir, date)\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n z.extractall(out_dir)\n return os.path.join(out_dir, date)\n\n\ndef make_available_data_dict():\n months = ['april', 'may', 'june']\n days = range(1, 32)\n available_data = OrderedDict()\n for year in ['2020']:\n for month in ['april', 'may', 'june']:\n for day in range(1, 32):\n available_data[month + '-{}'.format(day) + '-{}'.format(year)\n ] = ''\n return available_data\n\n\ndef download_data_batch(out_dir='../data/MA/raw'):\n data_dict = make_available_data_dict()\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n for date in data_dict.keys():\n data_dict[date] = download_data(date, out_dir)\n if data_dict[date] is None:\n data_dict.pop(date)\n return data_dict\n\n\ndef process_data(in_dir='', out_dir=''):\n data = {}\n list_of_dirs = glob.glob(in_dir + '/*')\n if len(list_of_dirs) is 0:\n list_of_dirs = [in_dir]\n print(list_of_dirs)\n for lD in list_of_dirs:\n list_of_files = glob.glob(lD + '/*.csv')\n for file in list_of_files:\n csv_in = csv.DictReader(open(file))\n for row in csv_in:\n print(row)\n\n\ndownload_data()\nprocess_data('../data/MA/raw/july-23-2020')\n",
"<docstring token>\n<import token>\n\n\ndef download_data(date='july-23-2020', out_dir='../data/MA/raw'):\n url = 'https://www.mass.gov/doc/covid-19-raw-data-{}/download'.format(date)\n r = requests.get(url, allow_redirects=True)\n try:\n z = zipfile.ZipFile(r.content)\n except:\n print(\"Date {} doesn't exist\".format(date))\n return None\n out_dir = os.path.join(out_dir, date)\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n z.extractall(out_dir)\n return os.path.join(out_dir, date)\n\n\ndef make_available_data_dict():\n months = ['april', 'may', 'june']\n days = range(1, 32)\n available_data = OrderedDict()\n for year in ['2020']:\n for month in ['april', 'may', 'june']:\n for day in range(1, 32):\n available_data[month + '-{}'.format(day) + '-{}'.format(year)\n ] = ''\n return available_data\n\n\ndef download_data_batch(out_dir='../data/MA/raw'):\n data_dict = make_available_data_dict()\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n for date in data_dict.keys():\n data_dict[date] = download_data(date, out_dir)\n if data_dict[date] is None:\n data_dict.pop(date)\n return data_dict\n\n\ndef process_data(in_dir='', out_dir=''):\n data = {}\n list_of_dirs = glob.glob(in_dir + '/*')\n if len(list_of_dirs) is 0:\n list_of_dirs = [in_dir]\n print(list_of_dirs)\n for lD in list_of_dirs:\n list_of_files = glob.glob(lD + '/*.csv')\n for file in list_of_files:\n csv_in = csv.DictReader(open(file))\n for row in csv_in:\n print(row)\n\n\ndownload_data()\nprocess_data('../data/MA/raw/july-23-2020')\n",
"<docstring token>\n<import token>\n\n\ndef download_data(date='july-23-2020', out_dir='../data/MA/raw'):\n url = 'https://www.mass.gov/doc/covid-19-raw-data-{}/download'.format(date)\n r = requests.get(url, allow_redirects=True)\n try:\n z = zipfile.ZipFile(r.content)\n except:\n print(\"Date {} doesn't exist\".format(date))\n return None\n out_dir = os.path.join(out_dir, date)\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n z.extractall(out_dir)\n return os.path.join(out_dir, date)\n\n\ndef make_available_data_dict():\n months = ['april', 'may', 'june']\n days = range(1, 32)\n available_data = OrderedDict()\n for year in ['2020']:\n for month in ['april', 'may', 'june']:\n for day in range(1, 32):\n available_data[month + '-{}'.format(day) + '-{}'.format(year)\n ] = ''\n return available_data\n\n\ndef download_data_batch(out_dir='../data/MA/raw'):\n data_dict = make_available_data_dict()\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n for date in data_dict.keys():\n data_dict[date] = download_data(date, out_dir)\n if data_dict[date] is None:\n data_dict.pop(date)\n return data_dict\n\n\ndef process_data(in_dir='', out_dir=''):\n data = {}\n list_of_dirs = glob.glob(in_dir + '/*')\n if len(list_of_dirs) is 0:\n list_of_dirs = [in_dir]\n print(list_of_dirs)\n for lD in list_of_dirs:\n list_of_files = glob.glob(lD + '/*.csv')\n for file in list_of_files:\n csv_in = csv.DictReader(open(file))\n for row in csv_in:\n print(row)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef make_available_data_dict():\n months = ['april', 'may', 'june']\n days = range(1, 32)\n available_data = OrderedDict()\n for year in ['2020']:\n for month in ['april', 'may', 'june']:\n for day in range(1, 32):\n available_data[month + '-{}'.format(day) + '-{}'.format(year)\n ] = ''\n return available_data\n\n\ndef download_data_batch(out_dir='../data/MA/raw'):\n data_dict = make_available_data_dict()\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n for date in data_dict.keys():\n data_dict[date] = download_data(date, out_dir)\n if data_dict[date] is None:\n data_dict.pop(date)\n return data_dict\n\n\ndef process_data(in_dir='', out_dir=''):\n data = {}\n list_of_dirs = glob.glob(in_dir + '/*')\n if len(list_of_dirs) is 0:\n list_of_dirs = [in_dir]\n print(list_of_dirs)\n for lD in list_of_dirs:\n list_of_files = glob.glob(lD + '/*.csv')\n for file in list_of_files:\n csv_in = csv.DictReader(open(file))\n for row in csv_in:\n print(row)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\ndef download_data_batch(out_dir='../data/MA/raw'):\n data_dict = make_available_data_dict()\n if os.path.isdir(out_dir) is not True:\n os.mkdir(out_dir)\n for date in data_dict.keys():\n data_dict[date] = download_data(date, out_dir)\n if data_dict[date] is None:\n data_dict.pop(date)\n return data_dict\n\n\ndef process_data(in_dir='', out_dir=''):\n data = {}\n list_of_dirs = glob.glob(in_dir + '/*')\n if len(list_of_dirs) is 0:\n list_of_dirs = [in_dir]\n print(list_of_dirs)\n for lD in list_of_dirs:\n list_of_files = glob.glob(lD + '/*.csv')\n for file in list_of_files:\n csv_in = csv.DictReader(open(file))\n for row in csv_in:\n print(row)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef process_data(in_dir='', out_dir=''):\n data = {}\n list_of_dirs = glob.glob(in_dir + '/*')\n if len(list_of_dirs) is 0:\n list_of_dirs = [in_dir]\n print(list_of_dirs)\n for lD in list_of_dirs:\n list_of_files = glob.glob(lD + '/*.csv')\n for file in list_of_files:\n csv_in = csv.DictReader(open(file))\n for row in csv_in:\n print(row)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,499 |
a0d891c1c06b1f3da10e8ef79ec4b77a8192c8a9
|
# -*- coding: utf-8 -*-
from ecl.tests.functional import base
from ecl.network.v2 import interdc as _interdc
class TestInterDCService(base.BaseFunctionalTest):
@classmethod
def setUpClass(cls):
super(TestInterDCService, cls).setUpClass()
cls.one_idc_service = None
idc_services = cls.conn.network.interdc_services()
if idc_services and len(idc_services) > 0:
cls.one_idc_service = idc_services[0]
def test_get(self):
sot = self.conn.network.get_interdc_service(self.one_idc_service.id)
self.assertEqual(self.one_idc_service.id, sot.id)
class TestInterDCGateway(base.BaseFunctionalTest):
@classmethod
def setUpClass(cls):
super(TestInterDCGateway, cls).setUpClass()
cls.one_idc_gateway = None
idc_gws = cls.conn.network.interdc_gateways()
if idc_gws and len(idc_gws) > 0:
cls.one_idc_gateway = idc_gws[0]
def test_get(self):
sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)
self.assertEqual(self.one_idc_gateway.id, sot.id)
def test_find_by_id(self):
sot = set.conn.network.find_interdc_gateway(
self.one_idc_gateway.id
)
self.assertEqual(self.one_idc_gateway.id, sot.id)
class TestInterDCInterface(base.BaseFunctionalTest):
@classmethod
def prepare_create(cls):
interdc_gws = cls.conn.network.interdc_gateways()
if interdc_gws and len(interdc_gws) > 0:
cls.one_idc_gateway = interdc_gws[0]
network = cls.conn.network.networks()
for nw in network:
if nw.name == "test-network":
cls.one_network = nw
break
@classmethod
def setUpClass(cls):
super(TestInterDCInterface, cls).setUpClass()
cls.one_idc_interface = None
cls.one_idc_gateway = None
cls.one_idc_gateway_name = "N000001996_V15000001"
cls.one_idc_interface_name = "interdc_interface_for_tenant-sdpgui01"
cls.one_network = None
idc_ifs = cls.conn.network.interdc_interfaces()
for idcif in idc_ifs:
if idcif.name == cls.one_idc_interface_name:
cls.one_idc_interface = idcif
if cls.one_idc_interface is None:
cls.prepare_create()
cls.one_idc_interface = cls.conn.network.create_interdc_interface(
cls.one_idc_gateway.id,
"xxx", "xxx", "xxx", "xxx", 1,
name=cls.one_idc_interface_name, )
def test_get(self):
sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id)
self.assertEqual(self.one_idc_interface.id, sot.id)
def test_create(self):
if self.one_idc_gateway is None or self.one_network is None:
self.prepare_create()
self.one_idc_interface = self.conn.network.create_interdc_interface(
self.one_idc_gateway.id,
"xxx", "xxx", "xxx", "xxx", 1,
name=self.one_idc_interface_name, )
self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface)
def test_update(self):
sot = self.conn.network.update_interdc_interface(self.one_idc_interface.id,
description="xxx")
self.assertIsInstance(sot, _interdc.InterDCInterface)
def test_delete(self):
sot = self.conn.network.delete_interdc_interface(self.one_idc_interface.id)
# self.assertIsInstance(sot, _interdc.InterDCInterface)
|
[
"# -*- coding: utf-8 -*-\n\nfrom ecl.tests.functional import base\nfrom ecl.network.v2 import interdc as _interdc\n\n\nclass TestInterDCService(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCService, cls).setUpClass()\n cls.one_idc_service = None\n idc_services = cls.conn.network.interdc_services()\n if idc_services and len(idc_services) > 0:\n cls.one_idc_service = idc_services[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_service(self.one_idc_service.id)\n self.assertEqual(self.one_idc_service.id, sot.id)\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n def test_find_by_id(self):\n sot = set.conn.network.find_interdc_gateway(\n self.one_idc_gateway.id\n )\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == \"test-network\":\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = \"N000001996_V15000001\"\n cls.one_idc_interface_name = \"interdc_interface_for_tenant-sdpgui01\"\n cls.one_network = None\n\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id,\n \"xxx\", \"xxx\", \"xxx\", \"xxx\", 1,\n name=cls.one_idc_interface_name, )\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id)\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id,\n \"xxx\", \"xxx\", \"xxx\", \"xxx\", 1,\n name=self.one_idc_interface_name, )\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface)\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.one_idc_interface.id,\n description=\"xxx\")\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.one_idc_interface.id)\n # self.assertIsInstance(sot, _interdc.InterDCInterface)\n",
"from ecl.tests.functional import base\nfrom ecl.network.v2 import interdc as _interdc\n\n\nclass TestInterDCService(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCService, cls).setUpClass()\n cls.one_idc_service = None\n idc_services = cls.conn.network.interdc_services()\n if idc_services and len(idc_services) > 0:\n cls.one_idc_service = idc_services[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_service(self.one_idc_service.id)\n self.assertEqual(self.one_idc_service.id, sot.id)\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n def test_find_by_id(self):\n sot = set.conn.network.find_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n\n\nclass TestInterDCService(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCService, cls).setUpClass()\n cls.one_idc_service = None\n idc_services = cls.conn.network.interdc_services()\n if idc_services and len(idc_services) > 0:\n cls.one_idc_service = idc_services[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_service(self.one_idc_service.id)\n self.assertEqual(self.one_idc_service.id, sot.id)\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n def test_find_by_id(self):\n sot = set.conn.network.find_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n\n\nclass TestInterDCService(base.BaseFunctionalTest):\n <function token>\n\n def test_get(self):\n sot = self.conn.network.get_interdc_service(self.one_idc_service.id)\n self.assertEqual(self.one_idc_service.id, sot.id)\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n def test_find_by_id(self):\n sot = set.conn.network.find_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n\n\nclass TestInterDCService(base.BaseFunctionalTest):\n <function token>\n <function token>\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n def test_find_by_id(self):\n sot = set.conn.network.find_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n def test_find_by_id(self):\n sot = set.conn.network.find_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n\n def test_get(self):\n sot = self.conn.network.get_interdc_gateway(self.one_idc_gateway.id)\n self.assertEqual(self.one_idc_gateway.id, sot.id)\n <function token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCGateway, cls).setUpClass()\n cls.one_idc_gateway = None\n idc_gws = cls.conn.network.interdc_gateways()\n if idc_gws and len(idc_gws) > 0:\n cls.one_idc_gateway = idc_gws[0]\n <function token>\n <function token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n\n\nclass TestInterDCGateway(base.BaseFunctionalTest):\n <function token>\n <function token>\n <function token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n\n def test_update(self):\n sot = self.conn.network.update_interdc_interface(self.\n one_idc_interface.id, description='xxx')\n self.assertIsInstance(sot, _interdc.InterDCInterface)\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n\n @classmethod\n def setUpClass(cls):\n super(TestInterDCInterface, cls).setUpClass()\n cls.one_idc_interface = None\n cls.one_idc_gateway = None\n cls.one_idc_gateway_name = 'N000001996_V15000001'\n cls.one_idc_interface_name = 'interdc_interface_for_tenant-sdpgui01'\n cls.one_network = None\n idc_ifs = cls.conn.network.interdc_interfaces()\n for idcif in idc_ifs:\n if idcif.name == cls.one_idc_interface_name:\n cls.one_idc_interface = idcif\n if cls.one_idc_interface is None:\n cls.prepare_create()\n cls.one_idc_interface = cls.conn.network.create_interdc_interface(\n cls.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name\n =cls.one_idc_interface_name)\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n <function token>\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n\n @classmethod\n def prepare_create(cls):\n interdc_gws = cls.conn.network.interdc_gateways()\n if interdc_gws and len(interdc_gws) > 0:\n cls.one_idc_gateway = interdc_gws[0]\n network = cls.conn.network.networks()\n for nw in network:\n if nw.name == 'test-network':\n cls.one_network = nw\n break\n <function token>\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n <function token>\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n <function token>\n <function token>\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n\n def test_create(self):\n if self.one_idc_gateway is None or self.one_network is None:\n self.prepare_create()\n self.one_idc_interface = self.conn.network.create_interdc_interface(\n self.one_idc_gateway.id, 'xxx', 'xxx', 'xxx', 'xxx', 1, name=\n self.one_idc_interface_name)\n self.assertIsInstance(self.one_idc_interface, _interdc.InterDCInterface\n )\n <function token>\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n <function token>\n <function token>\n\n def test_get(self):\n sot = self.conn.network.get_interdc_interface(self.one_idc_interface.id\n )\n self.assertEqual(self.one_idc_interface.id, sot.id)\n <function token>\n <function token>\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_delete(self):\n sot = self.conn.network.delete_interdc_interface(self.\n one_idc_interface.id)\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestInterDCInterface(base.BaseFunctionalTest):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.