content
stringlengths
0
1.55M
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>copy<line_sep>process=cms.Process("ProcessOne")<line_sep>## ## MessageLogger ## process.load('FWCore.MessageService.MessageLogger_cfi')<line_sep>process.MessageLogger.cerr.enable=<false><line_sep>process.MessageLogger.AlignPCLThresholdsWriter=dict()<line_sep>process.MessageLogger.AlignPCLThresholds=dict()<line_sep>process.MessageLogger.cout=cms.untracked.PSet(enable=cms.untracked.bool(<true>) enableStatistics=cms.untracked.bool(<true>) threshold=cms.untracked.string("INFO") default=cms.untracked.PSet(limit=cms.untracked.int32(0)) FwkReport=cms.untracked.PSet(limit=cms.untracked.int32(-1) reportEvery=cms.untracked.int32(1000)) AlignPCLThresholdsWriter=cms.untracked.PSet(limit=cms.untracked.int32(-1)) AlignPCLThresholds=cms.untracked.PSet(limit=cms.untracked.int32(-1)))<line_sep>## ## Empty source ## process.source=cms.Source("EmptyIOVSource" timetype=cms.string('runnumber') firstValue=cms.uint64(1) lastValue=cms.uint64(1) interval=cms.uint64(1))<line_sep>## ## Database output service ## process.load("CondCore.CondDB.CondDB_cfi")<line_sep>## ## Output database (in this case local sqlite file) ## process.CondDB.connect='sqlite_file:mythresholds.db'<line_sep>process.PoolDBOutputService=cms.Service("PoolDBOutputService" process.CondDB timetype=cms.untracked.string('runnumber') toPut=cms.VPSet(cms.PSet(record=cms.string('AlignPCLThresholdsRcd') tag=cms.string('PCLThresholds_express_v0'))))<line_sep>## ## Impot the thresholds configuration ## <import_stmt>CondFormats.PCLConfig.Thresholds_cff<as>Thresholds<line_sep>## ## Example on how to add to the default extra degrees of freedom ## AddSurfaceThresholds=copy.deepcopy(Thresholds.default)<line_sep>BPixSurface=cms.VPSet(cms.PSet(alignableId=cms.string("TPBModule") DOF=cms.string("Surface1") cut=cms.double(0.1) sigCut=cms.double(0.1) maxMoveCut=cms.double(0.1) maxErrorCut=cms.double(10.0)))<line_sep>DefaultPlusSurface=AddSurfaceThresholds+BPixSurface<line_sep>#print DefaultPlusSurface.dumpPython() process.WriteInDB=cms.EDAnalyzer("AlignPCLThresholdsWriter" record=cms.string('AlignPCLThresholdsRcd') ### minimum number of records found in pede output minNRecords=cms.uint32(25000) #thresholds = cms.VPSet() # empty object #thresholds = DefaultPlusSurface # add extra deegree of freedom thresholds=Thresholds.default# as a cms.VPset )<line_sep>process.p=cms.Path(process.WriteInDB)<line_sep>
# -*- coding: utf-8 -*- """ Module implementing MainWindow. """<import_stmt>os<import_from_stmt>.RadarUI Ui_MainWindow<import_from_stmt>PyQt5.QtCore *<import_from_stmt>PyQt5.QtWidgets *<import_from_stmt>PyQt5 QtWidgets<import_from_stmt>PyQt5.QtCore pyqtSlot<import_from_stmt>PyQt5.QtWidgets QMainWindow<import_from_stmt>..io read_auto<import_from_stmt>..io.util radar_format<import_from_stmt>..draw.SingleRadarPlot RadarGraph<import_from_stmt>..draw.SingleRadarPlotMap RadarGraphMap<import_from_stmt>..configure.location_config last_open_dir<import_from_stmt>glob glob<import_stmt>json<import_stmt>sys<import_from_stmt>.station_info Ui_Dialog<import_from_stmt>..draw.VerticalSectionPlot VerticalSection<import_from_stmt>matplotlib.figure Figure<import_from_stmt>matplotlib.backends.backend_qt5agg FigureCanvasQTAgg<as>FigureCanvas<line_sep>field_name=["dBZ" "V" "W" "ZDR" "KDP" "CC"]<class_stmt>LineBuilder<block_start><def_stmt>__init__ self fig ax radar_data product map_bool<block_start>self.ax=ax<line_sep>self.xs=[]<line_sep>self.ys=[]<line_sep>self.fig=fig<line_sep>self.map=map_bool<line_sep>self.cid=self.fig.canvas.mpl_connect('button_press_event' self)<line_sep>self.cursor=self.fig.canvas.mpl_connect('motion_notify_event' self.mouse_move)<line_sep>self.radar_dat=radar_data<line_sep>self.product=product<block_end><def_stmt>__call__ self event<block_start><if_stmt>len(self.xs)<l>2<block_start>self.xs.append(event.xdata)<line_sep>self.ys.append(event.ydata)<if_stmt>len(self.xs)<eq>1<block_start>self.start=self.ax.scatter(event.xdata event.ydata color="r" marker="+" s=60 zorder=len(self.xs)+10)<block_end><else_stmt><block_start>self.end=self.ax.scatter(event.xdata event.ydata color="r" marker="+" s=60 zorder=len(self.xs)+10)<line_sep>self.rline=self.ax.plot(self.xs self.ys color="r" linewidth=1 zorder=13)<line_sep>cv=FigureCanvas(Figure(figsize=(8 6)))<line_sep>ax=cv.figure.add_axes([0.1 0.3 0.8 0.6])<line_sep>cax=cv.figure.add_axes([0.1 0.1 0.8 0.06])<if_stmt><not>self.map<block_start>VerticalSection.GUI_section(cv.figure ax cax self.radar_dat [self.xs[0]<times>1000 self.ys[0]<times>1000] [self.xs[1]<times>1000 self.ys[1]<times>1000] field_name[self.product])<block_end><else_stmt><block_start>VerticalSection.GUI_section_map(cv.figure ax cax self.radar_dat [self.xs[0] self.ys[0]] [self.xs[1] self.ys[1]] field_name[self.product])<block_end>cv.show()<block_end>self.fig.canvas.draw()<block_end><else_stmt><block_start>self.rline[0].remove()<line_sep>self.start.remove()<line_sep>self.end.remove()<line_sep>self.xs=[]<line_sep>self.ys=[]<line_sep>self.xs.append(event.xdata)<line_sep>self.ys.append(event.ydata)<line_sep>self.start=self.ax.scatter(event.xdata event.ydata color="r" marker="+" s=60 zorder=len(self.xs)+10)<line_sep>self.fig.canvas.draw()<block_end><block_end><def_stmt>mouse_move self event<block_start><try_stmt><block_start>self.move_line[0].remove()<block_end><except_stmt>Exception<block_start><pass><block_end><if_stmt>len(self.xs)<eq>1<block_start>self.move_line=self.ax.plot([self.xs[0] event.xdata] [self.ys[0] event.ydata] color="r" linewidth=1 linestyle="--" zorder=100)<block_end>self.fig.canvas.draw()<block_end><block_end><class_stmt>Dialog(QDialog Ui_Dialog)<block_start>""" Class documentation goes here. """<def_stmt>__init__ self parent=<none><block_start>""" Constructor @param parent reference to the parent widget @type QWidget """<line_sep>super(Dialog self).__init__(parent)<line_sep>self.setupUi(self)<block_end>@pyqtSlot()<def_stmt>on_pushButton_clicked self<block_start>""" Slot documentation goes here. """<line_sep>self.lon=float(self.lineEdit.text())<line_sep>self.lat=float(self.lineEdit_2.text())<line_sep>self.height=float(self.lineEdit_3.text())<line_sep>self.close()<block_end>@pyqtSlot()<def_stmt>on_pushButton_2_clicked self<block_start>""" Slot documentation goes here. """<line_sep>self.close()<block_end>@pyqtSlot()<def_stmt>on_toolButton_clicked self<block_start>""" Slot documentation goes here. """<line_sep>lon,LonTrue=QInputDialog.getDouble(self r"经度" "雷达站点经度(单位:度)" 131.3 -180 180)<if_stmt>LonTrue<block_start>self.lineEdit.setText(str(lon))<block_end><block_end>@pyqtSlot()<def_stmt>on_toolButton_2_clicked self<block_start>""" Slot documentation goes here. """<line_sep># TODO: not implemented yet lat,LatTrue=QInputDialog.getDouble(self r"纬度" "雷达站点纬度(单位:度)" 23 -90 90)<if_stmt>LatTrue<block_start>self.lineEdit.setText(str(lat))<block_end><block_end>@pyqtSlot()<def_stmt>on_toolButton_3_clicked self<block_start>""" Slot documentation goes here. """<line_sep># TODO: not implemented yet height,HeightTrue=QInputDialog.getDouble(self r"高度" "雷达站点高度(单位:米)" 57 -2000 5000)<if_stmt>HeightTrue<block_start>self.lineEdit.setText(str(height))<block_end><block_end><block_end><class_stmt>MainWindow(QMainWindow Ui_MainWindow)<block_start>""" Class documentation goes here. """<def_stmt>__init__ self parent=<none><block_start>""" Constructor @param parent reference to the parent widget @type QWidget """<line_sep>super(MainWindow self).__init__(parent)<line_sep>self.setupUi(self)<line_sep>self.lastOpenDir=self.open_last_opendir()<line_sep>self.radar_dat=<none><line_sep>self.dualPOL=<false><line_sep>self.openbasename=<none><line_sep>self.files=<none><line_sep>self.radar_type=<none><line_sep>self.org_lat=131.3<line_sep>self.org_lon=23<line_sep>self.org_height=57<block_end><def_stmt>open_last_opendir self<block_start>"""打开上次关闭文件的位置"""<with_stmt>open(last_open_dir "r")<as>f<block_start>dir_dict=json.load(f)<block_end><return>dir_dict["lastOpenDir"]<block_end><def_stmt>write_last_opendir self filedir<block_start>"""将打开的位置写入json文件中"""<with_stmt>open(last_open_dir "w")<as>f<block_start>json.dump({"lastOpenDir":filedir} f)<block_end><block_end>@pyqtSlot()<def_stmt>on_actionvertical_changed self<block_start>"""垂直剖面的绘制"""<if_stmt>self.actionvertical.isChecked()<block_start><try_stmt><block_start>self.linebuilder=LineBuilder(self.fig self.ax self.radar_dat self.find_var_in_groupBox() self.actionwithmap.isChecked())<line_sep>self.clickevent=<true><block_end><except_stmt>AttributeError<block_start><pass><block_end><block_end><else_stmt><block_start>self.fig.canvas.mpl_disconnect(self.linebuilder.cid)<line_sep>self.fig.canvas.mpl_disconnect(self.linebuilder.cursor)<line_sep>self.linebuilder.rline[0].remove()<line_sep>self.linebuilder.start.remove()<line_sep>self.linebuilder.end.remove()<line_sep>self.fig.canvas.draw()<block_end><block_end>@pyqtSlot()<def_stmt>on_actionwithmap_changed self<block_start>""" Slot documentation goes here. """<line_sep><pass><block_end>@pyqtSlot()<def_stmt>on_actioncontinuous_changed self<block_start>""" Slot documentation goes here. """<line_sep><pass><block_end><def_stmt>Read_radar self filename<block_start><if_stmt>radar_format(filename)<is><not><none><block_start>NRadar=read_auto(filename)<line_sep>self.org_lat=NRadar.scan_info.latitude.values<line_sep>self.org_lon=NRadar.scan_info.longitude.values<line_sep>self.org_height=NRadar.scan_info.altitude.values<if_stmt>"KDP"<in>NRadar.fields[0].keys()<block_start>self.open_dual()<block_end><else_stmt><block_start>self.close_non_dual()<block_end><return>NRadar<block_end><else_stmt><block_start>QMessageBox.warning(self "数据错误警告" "非SA/SB/CA/CB/98D/CC/CCJ/SC/CD数据" QMessageBox.Yes)<line_sep><return>0<block_end><block_end><def_stmt>close_non_dual self<block_start>"""关闭非双偏振雷达变量"""<line_sep>self.radioButton_13.hide()<line_sep>self.radioButton_14.hide()<line_sep>self.radioButton_15.hide()<block_end><def_stmt>open_dual self<block_start>"""关闭非双偏振雷达变量"""<line_sep>self.radioButton_13.show()<line_sep>self.radioButton_14.show()<line_sep>self.radioButton_15.show()<block_end><def_stmt>setSelected self filename<block_start>"""将选中数据高亮"""<line_sep>basename=os.path.basename(filename)<line_sep>self.openbasename=basename<line_sep>items=self.listWidget.findItems(basename Qt.MatchExactly)<if_stmt>len(items)<g>0<block_start><for_stmt>item items<block_start>self.listWidget.setCurrentItem(item)<block_end><block_end><block_end><def_stmt>import_basedat self direc<block_start>"""查找文件夹中的所有雷达文件名,并以list返回"""<line_sep>self.lastOpenDir=direc<line_sep>self.write_last_opendir(direc)<line_sep>extensions=["*.*A" "*.*V" "*.bz2" "*.bin" "*.AR2" "*.gz" ".GZ"]<line_sep>files=[]<for_stmt>iextend extensions<block_start>file=glob(os.path.join(direc iextend))<line_sep>files.extend(file)<block_end><return>[os.path.basename(ifile)<for>ifile files]<block_end><def_stmt>add_listwidget self files<block_start>"""将files添加到listWidget"""<line_sep>self.listWidget.clear()<for_stmt>item files<block_start>self.listWidget.addItem(item)<block_end><block_end>@pyqtSlot(QListWidgetItem)<def_stmt>on_listWidget_itemDoubleClicked self item<block_start>""" Slot documentation goes here. @param item DESCRIPTION @type QListWidgetItem """<line_sep>filename=self.lastOpenDir+os.sep+item.text()<line_sep>self.radar_dat=self.Read_radar(filename)<if_stmt>self.radar_dat<ne>0<block_start>self.setSelected(filename)<line_sep>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_actionopen_2_triggered self<block_start>""" Slot documentation goes here. """<if_stmt>self.lastOpenDir<and>os.path.exists(self.lastOpenDir)<block_start>defaultOpenDirPath=self.lastOpenDir<block_end><else_stmt><block_start>defaultOpenDirPath='.'<block_end>filename=QFileDialog.getOpenFileName(self "打开一个雷达基数据" defaultOpenDirPath "天气雷达基数据(*bin *bz2 *A *V *BIN *BZ2 *AR2 *GZ *gz)")<line_sep>ReadFile=filename[0]<if_stmt>ReadFile.strip()<eq>""<block_start><return><block_end>PathDir=os.path.dirname(ReadFile)<line_sep>self.files=self.import_basedat(PathDir)<line_sep>self.add_listwidget(self.files)<line_sep>self.radar_dat=self.Read_radar(ReadFile)<if_stmt>self.radar_dat<ne>0<block_start>self.setSelected(ReadFile)<line_sep>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_actionopendir_2_triggered self<block_start>""" Slot documentation goes here. """<if_stmt>self.lastOpenDir<and>os.path.exists(self.lastOpenDir)<block_start>defaultOpenDirPath=self.lastOpenDir<block_end><else_stmt><block_start>defaultOpenDirPath='.'<block_end>self.targetDirPath=QFileDialog.getExistingDirectory(self "打开新一代天气雷达数据文件夹" defaultOpenDirPath QFileDialog.ShowDirsOnly|QFileDialog.DontResolveSymlinks)<if_stmt>self.targetDirPath.strip()<eq>''<block_start><return><block_end>self.files=self.import_basedat(self.targetDirPath)<line_sep>self.add_listwidget(self.files)<block_end>@pyqtSlot()<def_stmt>on_actionquit_2_triggered self<block_start>""" Slot documentation goes here. """<line_sep>sys.exit(0)<block_end>@pyqtSlot()<def_stmt>on_actionstation_triggered self<block_start>""" Slot documentation goes here. """<line_sep>self.my_info=Dialog()<line_sep>self.my_info.lineEdit.setText(str(self.org_lon))<line_sep>self.my_info.lineEdit_2.setText(str(self.org_lat))<line_sep>self.my_info.lineEdit_3.setText(str(self.org_height))<line_sep>self.my_info.lat=self.org_lat<line_sep>self.my_info.lon=self.org_lon<line_sep>self.my_info.height=self.org_height<line_sep>self.my_info.exec_()<line_sep>self.org_lat=self.my_info.lat<line_sep>self.org_lon=self.my_info.lon<line_sep>self.org_height=self.my_info.height<block_end><def_stmt>find_checked_radiobutton self radiobuttons<block_start>''' find the checked radiobutton '''<for_stmt>items radiobuttons<block_start><if_stmt>items.isChecked()<block_start>checked_radiobutton=items.text()<line_sep><return>checked_radiobutton<block_end><block_end><block_end><def_stmt>find_level_in_groupBox self<block_start>"""查找仰角"""<line_sep>level=self.find_checked_radiobutton(self.groupBox.findChildren(QtWidgets.QRadioButton))<line_sep>levels=["第1层" "第2层" "第3层" "第4层" "第5层" "第6层" "第7层" "第8层" "第9层"]<for_stmt>i range(9)<block_start><if_stmt>level<eq>levels[i]<block_start><return>i<block_end><block_end><return>0<block_end><def_stmt>find_var_in_groupBox self<block_start>"""查找变量"""<line_sep>var=self.find_checked_radiobutton(self.groupBox_2.findChildren(QtWidgets.QRadioButton))<line_sep>vars=["反射率因子" "径向速度" "谱宽" "差分反射率" "差分相位比" "相关系数"]<for_stmt>i range(6)<block_start><if_stmt>var<eq>vars[i]<block_start><return>i<block_end><block_end><return>0<block_end><def_stmt>plot_graph_PPI self radar level product map continuously<block_start>self.MplWidget.canvas.update()<line_sep>self.MplWidget.canvas.flush_events()<try_stmt><block_start>self.fig.clf()<line_sep>self.ax.clear()<line_sep>self.cax.clear()<block_end><except_stmt>AttributeError<block_start><pass><block_end><if_stmt><not>map<block_start>self.fig,self.ax,self.cax=self.MplWidget.canvas.get_fig_ax()<line_sep>self.ax.set_facecolor((0.95 0.95 0.95))<line_sep>self.pm=RadarGraph.GUI_plot(radar self.fig self.ax self.cax level field_name[product] continuously=continuously)<block_end><else_stmt><block_start>self.fig,self.ax,self.cax=self.MplWidget.canvas.get_fig_ax_map()<line_sep>self.ax.set_facecolor((0.95 0.95 0.95))<line_sep>self.pm=RadarGraphMap.GUI_plot(radar self.fig self.ax self.cax level field_name[product] continuously=continuously)<block_end>self.ax.tick_params(axis="y" which="both" direction='in')<line_sep>self.ax.tick_params(axis="x" which="both" direction='in')<line_sep>self.MplWidget.canvas.draw()<if_stmt>self.actionvertical.isChecked()#尝试重新绑定 <block_start><try_stmt><block_start>self.fig.canvas.mpl_disconnect(self.linebuilder.cid)<line_sep>self.fig.canvas.mpl_disconnect(self.linebuilder.cursor)<line_sep>self.linebuilder=LineBuilder(self.fig self.ax self.radar_dat self.find_var_in_groupBox() self.actionwithmap.isChecked())<line_sep>self.clickevent=<true><block_end><except_stmt>AttributeError<block_start><pass><block_end><block_end><block_end>@pyqtSlot()<def_stmt>on_pushButton_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.files<is><not><none><block_start>items=self.listWidget.findItems(self.openbasename Qt.MatchExactly)<line_sep>row=self.listWidget.row(items[0])<line_sep>nrows=len(self.files)<line_sep>res_row=row-1<if_stmt>res_row<l>0<block_start>res_row=nrows-1<block_end>self.radar_dat=self.Read_radar(self.lastOpenDir+os.sep+self.files[res_row])<if_stmt>self.radar_dat<ne>0<block_start>self.setSelected(self.lastOpenDir+os.sep+self.files[res_row])<line_sep>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end><block_end>@pyqtSlot()<def_stmt>on_pushButton_2_clicked self<block_start>""" Slot documentation goes here. 动画播放部分 """<if_stmt>self.files<is><not><none><block_start>items=self.listWidget.findItems(self.openbasename Qt.MatchExactly)<line_sep>row=self.listWidget.row(items[0])<line_sep>nrows=len(self.files)<for_stmt>irow range(row nrows)<block_start>self.radar_dat=self.Read_radar(os.path.join(self.lastOpenDir self.files[irow]))<if_stmt>self.radar_dat<ne>0<block_start>self.setSelected(self.lastOpenDir+os.sep+self.files[irow])<line_sep>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end><block_end><block_end>@pyqtSlot()<def_stmt>on_pushButton_3_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.files<is><not><none><block_start>items=self.listWidget.findItems(self.openbasename Qt.MatchExactly)<line_sep>row=self.listWidget.row(items[0])<line_sep>nrows=len(self.files)<line_sep>res_row=row+1<if_stmt>res_row<eq>nrows<block_start>res_row=0<block_end>self.radar_dat=self.Read_radar(self.lastOpenDir+os.sep+self.files[res_row])<if_stmt>self.radar_dat<ne>0<block_start>self.setSelected(self.lastOpenDir+os.sep+self.files[res_row])<line_sep>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_15_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_12_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_14_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_10_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_13_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_11_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_2_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_4_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_5_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_3_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_1_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_7_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_8_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_6_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end>@pyqtSlot()<def_stmt>on_radioButton_9_clicked self<block_start>""" Slot documentation goes here. """<if_stmt>self.radar_dat<is><not><none><block_start>self.plot_graph_PPI(self.radar_dat self.find_level_in_groupBox() self.find_var_in_groupBox() self.actionwithmap.isChecked() self.actioncontinuous.isChecked())<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>app=QtWidgets.QApplication(sys.argv)<line_sep>ui=MainWindow()<line_sep>ui.show()<line_sep>sys.exit(app.exec_())<block_end>
# Authored by : gusdn3477 # Co-authored by : tony9402 # Link : http://boj.kr/8a53cdacfc6340c894fb47257232f244 <import_stmt>sys<import_from_stmt>collections deque<def_stmt>input <block_start><return>sys.stdin.readline().rstrip()<block_end><def_stmt>checkMap <block_start><for_stmt>z range(H)<block_start><for_stmt>i range(N)<block_start><for_stmt>j range(M)<block_start><if_stmt>arr[z][i][j]<eq>0<block_start><return><false><block_end><block_end><block_end><block_end><return><true><block_end><def_stmt>BFS <block_start><while_stmt>queue<block_start>q=queue.popleft()<line_sep>z,x,y=q[0]<for_stmt>i range(6)<block_start>dx=x+nx[i]<line_sep>dy=y+ny[i]<line_sep>dz=z+nz[i]<if_stmt>dx<l>0<or>dx<ge>N<or>dy<l>0<or>dy<ge>M<or>dz<l>0<or>dz<ge>H<block_start><continue><block_end><if_stmt>arr[dz][dx][dy]<eq>0<block_start>arr[dz][dx][dy]=1<line_sep>queue.append(((dz dx dy) q[1]+1))<block_end><block_end><block_end><if_stmt>checkMap()<block_start><return>q[1]<block_end><return>-1<block_end>M,N,H=map(int input().split())<line_sep>arr=[]<line_sep>nx=[-1 0 1 0 0 0]<line_sep>ny=[0 -1 0 1 0 0]<line_sep>nz=[0 0 0 0 -1 1]<line_sep>queue=deque()<line_sep>arr=[[list(map(int input().split()))<for>_ range(N)]<for>_ range(H)]<for_stmt>z range(H)<block_start><for_stmt>i range(N)<block_start><for_stmt>j range(M)<block_start><if_stmt>arr[z][i][j]<eq>1<block_start>arr[z][i][j]=1<line_sep>queue.append(((z i j) 0))<block_end><block_end><block_end><block_end>ans=BFS()<line_sep>print(ans)<line_sep>
# Copyright 2017-2019 typed_python Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typed_python.compiler.type_wrappers.wrapper Wrapper<import_stmt>typed_python.compiler.native_ast<as>native_ast<import_from_stmt>typed_python Int32<def_stmt>tp_hash_to_py_hash hVal<block_start>"""Convert a typed-python hash to a regular python hash. Python insists that its hash values are never -1, because it uses -1 as an indicator that the exception flag is set. TypedPython doesn't have this behavior because it uses c++ exception propagation internally. As a result, it's the 'hash' wrapper that's responsible for mapping -1 to -2. """<if_stmt>hVal<eq>-1<block_start><return>Int32(-2)<block_end><return>hVal<block_end><class_stmt>HashWrapper(Wrapper)<block_start>is_pod=<true><line_sep>is_empty=<false><line_sep>is_pass_by_ref=<false><def_stmt>__init__ self<block_start>super().__init__(hash)<block_end><def_stmt>getNativeLayoutType self<block_start><return>native_ast.Type.Void()<block_end><def_stmt>convert_call self context expr args kwargs<block_start><if_stmt>len(args)<eq>1<and><not>kwargs<block_start>hashVal=args[0].convert_hash()<if_stmt>hashVal<is><none><block_start><return><none><block_end><return>context.call_py_function(tp_hash_to_py_hash (hashVal ) {})<block_end><return>super().convert_call(context expr args kwargs)<block_end><block_end>
"""Test Grapheme to Phoneme module"""<import_stmt>unittest<import_from_stmt>pororo Pororo<class_stmt>PororoPhonemeConversionTester(unittest.TestCase)<block_start><def_stmt>test_modules self<block_start>g2pk=Pororo(task="g2p" lang="ko")<line_sep>g2pk_res=g2pk("어제는 날씨가 맑았는데, 오늘은 흐리다.")<line_sep>self.assertIsInstance(g2pk_res str)<line_sep>g2pen=Pororo(task="g2p" lang="en")<line_sep>g2pen_res=g2pen("I have $250 in my pocket.")<line_sep>self.assertIsInstance(g2pen_res list)<line_sep>g2pzh=Pororo(task="g2p" lang="zh")<line_sep>g2pzh_res=g2pzh("然而,他红了20年以后,他竟退出了大家的视线。")<line_sep>self.assertIsInstance(g2pzh_res str)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_stmt>django.db models<import_from_stmt>django.core.validators MinValueValidator MaxValueValidator<class_stmt>GuacdServer(models.Model)<block_start><class_stmt>Meta<block_start>verbose_name="Guacd Server"<line_sep>verbose_name_plural="Guacd Servers"<block_end>name=models.CharField(max_length=64 blank=<false> unique=<true> default="guacd server")<line_sep>hostname=models.CharField(max_length=64 blank=<false> default="localhost")<line_sep>port=models.PositiveIntegerField(blank=<false> default=4822 validators=[MinValueValidator(1) MaxValueValidator(65535)])<def_stmt>__str__ self<block_start><return>self.name<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access """API for graph traversing."""<import_stmt>threading<import_stmt>re<import_stmt>tvm<import_from_stmt>tvm relay autotvm<import_from_stmt>tvm.relay transform<import_from_stmt>tvm.relay.expr Call TupleGetItem Var Constant Tuple<import_from_stmt>tvm.relay.function Function<import_from_stmt>tvm.relay.ty TupleType TensorType<import_from_stmt>tvm.autotvm.task TaskExtractEnv<import_from_stmt>.utils has_multiple_inputs is_boundary_node is_skipped_node<import_from_stmt>.._base OPT_OUT_OP<def_stmt>expr2graph expr target_ops node_dict node_list tvm_target<block_start>"""Convert relay expr to graph data structure and fetch workloads of target operators. Parameters ---------- expr : tvm.relay.Expr.Function Input relay function expression. target_ops: List of tvm.ir.Op List of target relay ops node_dict : dictionary from tvm.relay.Expr to int Dictionary to record node index node_list : list of dictionary List of nodes which contains all expr in the input relay function. Each node will be stored as a dictionary in the format of {"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type], "name": str, "workloads": [tuple], "topi_op": [function]} tvm_target : tvm.target The TVM target object. """<line_sep># TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact # that # autotvm tasks == # ops. But this won't be true after having relay op # strategy. We need to find a solution to fix this. env=TaskExtractEnv.get(allow_duplicate=<true>)<line_sep>env.reset(target_ops)<line_sep># pylint: disable=not-context-manager <with_stmt>env<block_start>_expr2graph_impl(expr target_ops node_dict node_list tvm_target)<line_sep>task_pos=0<for_stmt>node_entry node_list<block_start><if_stmt>node_entry["op"]<in>target_ops<block_start>task_name,args=env.task_collection[task_pos]<line_sep>task=autotvm.task.create(task_name args target=tvm_target)<line_sep>node_entry["workloads"]=[task.workload]<line_sep>node_entry["topi_op"]=[task_name]<line_sep>task_pos<augadd>1<block_end><block_end><block_end><block_end><def_stmt>_infer_type node<block_start>"""A method to infer the type of a relay expression."""<line_sep>mod=tvm.IRModule.from_expr(node)<line_sep>mod=transform.InferType()(mod)<line_sep>entry=mod["main"]<line_sep><return>entry<if>isinstance(node relay.Function)<else>entry.body<block_end><def_stmt>_replace_device_with_tracing target<block_start>"""This is to replace -device=XXX with -device=tracing in the tvm_target string. It is a stand-along function for testability. We need to have device=tracing in order to fetch the workloads, it is not used for anything beyond that so it is safe to override the device here only."""<line_sep>target=str(target)<if_stmt>"-device"<in>target<block_start><return>re.sub("-device=[^\\-$]+" "-device=tracing " target).strip(" ")<block_end><return>target+" -device=tracing"<block_end><def_stmt>_expr2graph_impl expr target_ops node_dict node_list tvm_target<block_start>"""Implementation to convert relay expr to graph data structure"""<def_stmt>_traverse_expr node<block_start><if_stmt>node<in>node_dict<block_start><return><block_end>node_index=len(node_list)<line_sep>node_entry={"node":node "inputs":[] "types":[] "op":<none> "name":<none>}<if_stmt>isinstance(node Call)<block_start>op=node.op<line_sep>node_entry["op"]=node.op<for_stmt>arg node.args<block_start>in_node_idx=node_dict[arg]<if_stmt>isinstance(arg (Tuple TupleGetItem))<block_start>node_entry["inputs"]<augadd>node_list[in_node_idx]["inputs"]<block_end><else_stmt><block_start>node_entry["inputs"].append([in_node_idx 0 0])<block_end><block_end>infer_out=_infer_type(node)<line_sep>out_type=infer_out._checked_type_<if_stmt>isinstance(out_type TensorType)<block_start>node_entry["types"].append(out_type)<block_end><elif_stmt>isinstance(out_type TupleType)<block_start><for_stmt>tupe_type out_type.fields<block_start>node_entry["types"].append(tupe_type)<block_end><block_end><else_stmt><block_start><raise>RuntimeError("Unsupported output type %s in operator %s"%(type(out_type) op.name))<block_end># Utilize tracing target to fetch workload with topo-order. # Since we only need workload, dummy target can be used to # create task. <if_stmt>op<in>target_ops<block_start>params=[]<for_stmt>i,input_idx enumerate(node_entry["inputs"])<block_start>input_node_entry=node_list[input_idx[0]]<line_sep>input_type=input_node_entry["types"][input_idx[1]]<if_stmt><not>isinstance(input_node_entry["node"] (Var Constant Call))<block_start><raise>RuntimeError("Graph tuner can only tune target "<concat>"operators with input node of type "<concat>"relay.expr.Var/Constant/Call. Now "<concat>"find a target op %s with input type %s"%(op str(type(input_node_entry["node"]))))<block_end>free_var=relay.Var("var_%d"%i input_type)<line_sep>params.append(free_var)<block_end>call=relay.Call(node.op params node.attrs)<line_sep>mod=tvm.IRModule.from_expr(relay.Function(params call))<line_sep>relay.backend.te_compiler.get().clear()<line_sep>tracing_target=_replace_device_with_tracing(tvm_target)<line_sep>build_thread=threading.Thread(target=relay.build args=(mod tracing_target))<line_sep>build_thread.start()<line_sep>build_thread.join()<block_end><block_end><elif_stmt>isinstance(node Var)<block_start>node_entry["name"]=node.name_hint<line_sep>node_entry["types"]=[node.type_annotation]<block_end><elif_stmt>isinstance(node Function)# Ignore root node since it equals to input function expression <block_start><if_stmt>node<ne>expr<block_start>_expr2graph_impl(node target_ops node_dict node_list tvm_target)<block_end><return><block_end><elif_stmt>isinstance(node TupleGetItem)<block_start>in_node_idx=node_dict[node.tuple_value]<line_sep>node_entry["inputs"].append([in_node_idx node.index 0])<block_end><elif_stmt>isinstance(node Tuple)<block_start><for_stmt>tuple_item node<block_start>in_node_idx=node_dict[tuple_item]<if_stmt>isinstance(tuple_item TupleGetItem)<block_start>node_entry["inputs"]<augadd>node_list[in_node_idx]["inputs"]<block_end><elif_stmt>isinstance(tuple_item Tuple)<block_start><raise>RuntimeError("Graph tuner doesn't support nested tuple.")<block_end><else_stmt><block_start>node_entry["inputs"].append([in_node_idx 0 0])<block_end><block_end><block_end><elif_stmt>isinstance(node Constant)<block_start>node_entry["name"]="Constant_"+str(node_index)<line_sep>node_entry["types"]=[node.checked_type]<block_end><elif_stmt>isinstance(node tvm.ir.Op)<block_start><return><block_end><else_stmt><block_start><raise>RuntimeError("Not supported relay node type in graph tuning: %s"%str(type(node)))<block_end>node_dict[node]=node_index<line_sep>node_list.append(node_entry)<block_end>relay.analysis.post_order_visit(expr _traverse_expr)<block_end><def_stmt>get_direct_ancestor node_list visited_dict target_ops node_idx input_names<block_start>"""Given a node_list in relay function and a node index, return the closest ancestor which has op_name as operator name or is multi_input operator. If node has multiple inputs, multiple ancestor nodes will be returned. Parameters ---------- node_list : list of dict of str to object List of all nodes in a graph. visited_dict : dict of int to int Nodes and corresponding ancestors which have been visited. target_ops: List of str List of target relay base op name node_idx : int Input node index. input_names : list of str Names of graph input nodes. Returns ------- out : list of int List of ancestor node index. """<if_stmt>node_idx<in>visited_dict<block_start><return>visited_dict[node_idx]<block_end>node=node_list[node_idx]<if_stmt>is_boundary_node(node input_names)<block_start><return>[node_idx]<block_end>node_direct_ancestor=[]<for_stmt>item_idx node["inputs"]<block_start>item=node_list[item_idx[0]]<line_sep>is_multiple_inputs=has_multiple_inputs(node_list item_idx[0] input_names OPT_OUT_OP)<if_stmt>item["op"]<in>target_ops<or>is_multiple_inputs<block_start>node_direct_ancestor.append(item_idx[0])<block_end><else_stmt><block_start>tmp=get_direct_ancestor(node_list visited_dict target_ops item_idx[0] input_names)<for_stmt>tmp_item tmp<block_start><if_stmt>tmp_item<not><in>node_direct_ancestor<block_start>node_direct_ancestor.append(tmp_item)<block_end><block_end><block_end><block_end>visited_dict[node_idx]=node_direct_ancestor<line_sep><return>node_direct_ancestor<block_end><def_stmt>get_in_nodes node_list target_ops input_names<block_start>"""Create a dictionary mapping from op_name nodes or multi-input nodes to closest input ancestors. Parameters ---------- node_list : list of dict of str to object List of all nodes in a graph. target_ops: List of str List of target relay op input_names : list of str Names of graph input nodes. Returns ------- out : dict of int to list of int Dictionary maps node index to closest input ancestors. """<line_sep>visited_dict={}<line_sep>in_node_dict={}<for_stmt>i,node enumerate(node_list)<block_start><if_stmt>is_boundary_node(node input_names)<or>is_skipped_node(node)<block_start><continue><block_end>get_direct_ancestor(node_list visited_dict target_ops i input_names)<block_end><for_stmt>key,val visited_dict.items()<block_start>node=node_list[key]<line_sep>is_multiple_inputs=has_multiple_inputs(node_list key input_names OPT_OUT_OP)<if_stmt>node["op"]<in>target_ops<or>is_multiple_inputs<block_start>in_node_dict[key]=val<block_end><block_end># Reduce boundary nodes out_node_dict=get_out_nodes(in_node_dict)<line_sep>has_reduced_node=<true><while_stmt>has_reduced_node<block_start>boundary_nodes=[]<for_stmt>key,val in_node_dict.items()<block_start>node=node_list[key]<line_sep>is_boundary=<true><line_sep># Target ops can't be boundary nodes <if_stmt>node["op"]<not><in>target_ops<block_start><for_stmt>input_idx val<block_start>in_node=node_list[input_idx]<if_stmt><not>is_boundary_node(in_node input_names)<and>input_idx<in>in_node_dict<block_start>is_boundary=<false><block_end><else_stmt><block_start>val.remove(input_idx)<block_end><if_stmt>is_boundary<block_start>boundary_nodes.append(key)<block_end><block_end><block_end><block_end><if_stmt>boundary_nodes<block_start><for_stmt>idx boundary_nodes<block_start><if_stmt>idx<in>in_node_dict<block_start><del_stmt>in_node_dict[idx]<block_end><block_end><block_end><else_stmt><block_start>has_reduced_node=<false><block_end><block_end># Remove empty nodes to ignore pre-computed sub-graph has_empty_node=<true><while_stmt>has_empty_node<block_start>empty_nodes=[]<for_stmt>key,val in_node_dict.items()<block_start><if_stmt><not>val<block_start>empty_nodes.append(key)<block_end><block_end><if_stmt>empty_nodes<block_start>has_empty_node=<true><for_stmt>node empty_nodes<block_start><del_stmt>in_node_dict[node]<if_stmt>node<in>out_node_dict<block_start><for_stmt>out_node out_node_dict[node]<block_start>in_node_dict[out_node].remove(node)<block_end><block_end><block_end><block_end><else_stmt><block_start>has_empty_node=<false><block_end><block_end><return>in_node_dict<block_end><def_stmt>get_out_nodes in_node_dict<block_start>"""Create output dictionary from input dictionary. Parameters ---------- in_node_dict : dict of int to list of int Dictionary maps node index to closest input ancestors. It can be created with get_in_nodes. Returns ------- out : dict of int to list of int Dictionary maps node index to closest output nodes. """<line_sep>out_node_dict={}<for_stmt>key in_node_dict<block_start>out_node_dict[key]=[]<block_end><for_stmt>key,val in_node_dict.items()<block_start><for_stmt>item val<block_start><if_stmt>item<in>out_node_dict<block_start>out_node_dict[item].append(key)<block_end><else_stmt><block_start>out_node_dict[item]=[key]<block_end><block_end><block_end><return>out_node_dict<block_end>
<import_stmt>whois<import_from_stmt>pythonping ping<import_stmt>re<def_stmt>whoami target post#target=input("Enter the IP Address/Domain:") <block_start>getweb=str(ping(target))<line_sep>ip=re.compile('(([2][5][0-5]\.)|([2][0-4][0-9]\.)|([0-1]?[0-9]?[0-9]\.)){3}'+'(([2][5][0-5])|([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))')<line_sep>match=ip.search(getweb)<if_stmt>match#target=match.group() <block_start>w=whois.whois(target)<line_sep>print("Domain Name:"+str(w['domain_name']))<line_sep>print("Register:"+str(w['registrar']))<try_stmt><block_start>print("Whois Server:"+str(w['whois_server']))<block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end>print("Server:"+str(w['name_servers']))<line_sep>print("Emails:"+str(w['emails']))<try_stmt><block_start>print("Organisation:"+str(w['org']))<block_end><except_stmt>Exception<as>e<block_start>print("Organisation:"+str(w['organization']))<line_sep>print(e)<block_end><try_stmt><block_start>print("Address:"+str(w['address']))<line_sep>print("City:"+str(w['city']))<line_sep>print("State:"+str(w['state']))<line_sep>print("Zipcode:"+str(w['zipcode']))<block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end>print("Country:"+str(w['country']))<block_end><block_end>
''' HOW TO RUN THIS CODE (if tests are within the assignment 1 root): python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q --cov py.test.exe --cov=cs224d/ tests/test_sigmoid_to_solutions.py --cov-report html (if the tests are within the subfolder tests) PYTHONPATH=${PWD} py.test.exe tests/ -v --cov-report html python -m pytest tests -v --cov-report html Open index.html contained within htmlcov '''<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>q2_sigmoid sigmoid sigmoid_grad<import_from_stmt>q2_sigmoid_sol sigmoid_sol sigmoid_grad_sol<import_stmt>random<import_from_stmt>collections defaultdict OrderedDict Counter<line_sep>COUNT=5<def_stmt>rel_error x y<block_start>""" returns relative error """<line_sep><return>np.max(np.abs(x-y)/(np.maximum(1e-7 np.abs(x)+np.abs(y))))<block_end>@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid sigmoid_f<block_start>""" Original sigmoid test defined in q2_sigmoid.py; """<line_sep>x=np.array([[1 2] [-1 -2]])<line_sep>f=sigmoid_f(x)<assert_stmt>rel_error(f np.array([[0.73105858 0.88079708] [0.26894142 0.11920292]]))<le>1e-7<block_end>@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoidgrad sigmoid_f<block_start>""" Original sigmoid gradient test defined in q2_sigmoid.py; """<line_sep>x=np.array([[1 2] [-1 -2]])<line_sep>f=sigmoid(x)<line_sep>g=sigmoid_grad(f)<assert_stmt>rel_error(g np.array([[0.19661193 0.10499359] [0.19661193 0.10499359]]))<le>1e-7<block_end>@pytest.mark.parametrize("dim" list(range(1 8)))@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_shape dim sigmoid_f<block_start>testing_shape=[]<for_stmt>y range(0 dim)<block_start>testing_shape.append(np.random.randint(3 8))<block_end>shape=tuple(testing_shape)<line_sep>#z = np.random.randn(*testing_shape) x=np.random.standard_normal(shape)<line_sep>y=np.copy(x)<assert_stmt>x.shape<eq>sigmoid(y).shape<assert_stmt>x.shape<eq>sigmoid_grad(sigmoid(y)).shape<block_end>@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_minus_z sigmoid_f count=100<block_start>z=np.random.normal(loc=0. scale=100. size=count)<line_sep>y=-z<assert_stmt>rel_error(1-sigmoid(y) sigmoid(z))<le>1e-7<block_end>@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_monotone sigmoid_f count=100<block_start>z=np.random.normal(loc=0. scale=100. size=count)<line_sep>shift=np.random.uniform(low=0. high=10. size=count)<assert_stmt>np.all(sigmoid(z+shift)-sigmoid(z))<ge>0<assert_stmt>np.all(sigmoid(z-shift)-sigmoid(z))<le>0<block_end>@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_range sigmoid_f count=100<block_start>z=np.random.normal(loc=0. scale=100. size=count)<assert_stmt>np.max(sigmoid(z))<le>1.<assert_stmt>np.max(sigmoid(z))<ge>0.<block_end>@pytest.mark.parametrize("dim_1" list(range(1 20)))@pytest.mark.parametrize('execution_number' list(range(COUNT)))@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_permutation_axis0 dim_1 execution_number sigmoid_f<block_start>""" sigmoid needs to be applied element-wise;"""<line_sep>a1=np.random.normal(size=(dim_1 1))<line_sep>s1=sigmoid(a1)<line_sep>permutation=np.random.permutation(dim_1)<line_sep>inverse_permutation=np.argsort(permutation)<line_sep>s1_perm=sigmoid(a1[permutation])<assert_stmt>rel_error(s1_perm[inverse_permutation] s1)<le>1e-8<block_end>@pytest.mark.parametrize("dim_1" list(range(1 20)))@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_permutation_axis1 dim_1 sigmoid_f<block_start>a1=np.random.normal(size=(1 dim_1))<line_sep>s1=sigmoid(a1)<line_sep>permutation=np.random.permutation(dim_1)<line_sep>inverse_permutation=np.argsort(permutation)<line_sep>s1_perm=sigmoid(a1.ravel()[permutation])<assert_stmt>rel_error(s1_perm.ravel()[inverse_permutation] s1)<le>1e-8<block_end>#note: permutation(sigmoid(x)) = sigmoid(permutation(x)) @pytest.mark.parametrize("dim_1" list(range(1 20)))@pytest.mark.parametrize("dim_2" list(range(1 20)))@pytest.mark.parametrize("sigmoid_f" [sigmoid sigmoid_sol])<def_stmt>test_sigmoid_gradient dim_1 dim_2 sigmoid_f<block_start>a1=np.random.normal(loc=0. scale=20. size=(dim_1 dim_2))<line_sep>shift=np.random.uniform(low=1e-9 high=1e-5 size=(dim_1 dim_2))<line_sep>ap=a1+shift<line_sep>am=a1-shift<line_sep>dsigmoid=(sigmoid(ap)-sigmoid(am))/(2<times>shift)<assert_stmt>np.abs(np.max(dsigmoid-sigmoid_grad(sigmoid(a1))))<le>1e-7<assert_stmt>np.abs(np.min(dsigmoid-sigmoid_grad(sigmoid(a1))))<le>1e-7<block_end>@pytest.mark.parametrize("dim_1" list(range(1 20)))@pytest.mark.parametrize("dim_2" list(range(1 20)))<def_stmt>test_sigmoid dim_1 dim_2<block_start>a1=np.random.normal(loc=0. scale=20. size=(dim_1 dim_2))<assert_stmt>rel_error(sigmoid(a1) sigmoid_sol(a1))<le>1e-10<block_end>@pytest.mark.parametrize("dim_1" list(range(1 20)))@pytest.mark.parametrize("dim_2" list(range(1 20)))<def_stmt>test_sigmoid dim_1 dim_2<block_start>a1=np.random.normal(loc=0. scale=20. size=(dim_1 dim_2))<line_sep>a1_copy=a1.copy()<line_sep>s_a1=sigmoid(a1)<line_sep>s_sol_a1=sigmoid_sol(a1_copy)<assert_stmt>rel_error(sigmoid_grad(s_a1) sigmoid_grad_sol(s_sol_a1))<le>1e-10<block_end>@pytest.mark.parametrize("dim_1" list(range(1 20)))@pytest.mark.parametrize("dim_2" list(range(1 20)))<def_stmt>test_sigmoid dim_1 dim_2<block_start>a1=np.random.normal(loc=0. scale=20. size=(dim_1 dim_2))<line_sep>a1_copy=a1.copy()<assert_stmt>rel_error(sigmoid_grad(a1) sigmoid_grad_sol(a1_copy))<le>1e-10<block_end>
<import_stmt>itertools<import_stmt>os<import_stmt>shutil<import_stmt>tempfile<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>coremltools._deps _HAS_KERAS2_TF<import_from_stmt>coremltools.models _MLMODEL_FULL_PRECISION _MLMODEL_HALF_PRECISION<import_from_stmt>coremltools.models.utils _macos_version _is_macos<if_stmt>_HAS_KERAS2_TF<block_start><import_stmt>keras.backend<import_from_stmt>keras.models Sequential Model<import_from_stmt>keras.layers Dense Activation Conv2D Conv1D Flatten BatchNormalization Conv2DTranspose SeparableConv2D <import_from_stmt>keras.layers MaxPooling2D AveragePooling2D GlobalAveragePooling2D GlobalMaxPooling2D <import_from_stmt>keras.layers MaxPooling1D AveragePooling1D GlobalAveragePooling1D GlobalMaxPooling1D <import_from_stmt>keras.layers Embedding Input Permute Reshape RepeatVector Dropout<import_from_stmt>keras.layers Add Concatenate<import_from_stmt>keras.layers add multiply concatenate dot maximum average<import_from_stmt>keras.layers ZeroPadding2D UpSampling2D Cropping2D<import_from_stmt>keras.layers ZeroPadding1D UpSampling1D Cropping1D<import_from_stmt>keras.layers SimpleRNN LSTM GRU<import_from_stmt>keras.layers.core SpatialDropout2D<import_from_stmt>keras.layers.wrappers Bidirectional TimeDistributed<import_from_stmt>distutils.version StrictVersion<as>_StrictVersion<if_stmt>keras.__version__<ge>_StrictVersion("2.2.1")<block_start><import_from_stmt>keras.layers DepthwiseConv2D ReLU<block_end><elif_stmt>keras.__version__<ge>_StrictVersion("2.2.0")<block_start><import_from_stmt>keras.layers DepthwiseConv2D<import_from_stmt>keras_applications.mobilenet relu6<block_end><else_stmt><block_start><import_from_stmt>keras.applications.mobilenet DepthwiseConv2D relu6<block_end><block_end><def_stmt>_keras_transpose x is_sequence=<false><block_start><if_stmt>len(x.shape)<eq>5# Keras input shape = [Batch, Seq, Height, Width, Channels] <block_start>x=np.transpose(x [1 0 4 2 3])<block_end><if_stmt>len(x.shape)<eq>4# Keras input shape = [Batch, Height, Width, Channels] <block_start>x=np.transpose(x [0 3 1 2])<line_sep><return>np.expand_dims(x axis=0)<block_end><elif_stmt>len(x.shape)<eq>3# Keras input shape = [Batch, (Sequence) Length, Channels] <block_start><return>np.transpose(x [1 0 2])<block_end><elif_stmt>len(x.shape)<eq>2<block_start><if_stmt>is_sequence# (N,S) --> (S,N,1,) <block_start><return>x.reshape(x.shape[::-1]+(1 ))<block_end><else_stmt># (N,C) --> (N,C,1,1) <block_start><return>x.reshape((1 )+x.shape)# Dense <block_end><block_end><elif_stmt>len(x.shape)<eq>1<block_start><if_stmt>is_sequence# (S) --> (S,N,1,1,1) <block_start><return>x.reshape((x.shape[0] 1 1))<block_end><else_stmt><block_start><return>x<block_end><block_end><else_stmt><block_start><return>x<block_end><block_end><def_stmt>_get_coreml_model model input_names=["data"] output_names=["output"] input_name_shape_dict={} model_precision=_MLMODEL_FULL_PRECISION use_float_arraytype=<false> <block_start>""" Get the coreml model from the Keras model. """<line_sep># Convert the model <import_from_stmt>coremltools.converters keras<as>keras_converter<line_sep>model=keras_converter.convert(model input_names output_names input_name_shape_dict=input_name_shape_dict model_precision=model_precision use_float_arraytype=use_float_arraytype )<line_sep><return>model<block_end><def_stmt>_generate_data input_shape mode="random"<block_start>""" Generate some random data according to a shape. """<if_stmt>mode<eq>"zeros"<block_start>X=np.zeros(input_shape)<block_end><elif_stmt>mode<eq>"ones"<block_start>X=np.ones(input_shape)<block_end><elif_stmt>mode<eq>"linear"<block_start>X=np.array(range(np.product(input_shape))).reshape(input_shape)<block_end><elif_stmt>mode<eq>"random"<block_start>X=np.random.rand(*input_shape)<block_end><elif_stmt>mode<eq>"random_zero_mean"<block_start>X=np.random.rand(*input_shape)-0.5<block_end><return>X<block_end>@unittest.skipIf(<not>_HAS_KERAS2_TF "Missing keras. Skipping tests.")@pytest.mark.keras2<class_stmt>KerasNumericCorrectnessTest(unittest.TestCase)<block_start>""" Unit test class for testing the Keras converter. """<def_stmt>runTest self<block_start><pass><block_end><def_stmt>_get_coreml_model_params_and_test_input self model mode one_dim_seq_flags input_name_shape_dict={}# Generate data <block_start>nb_inputs=len(model.inputs)<if_stmt>nb_inputs<g>1<block_start>input_names=[]<line_sep>input_data=[]<line_sep>coreml_input={}<for_stmt>i range(nb_inputs)<block_start>feature_name="data_%s"%i<line_sep>input_names.append(feature_name)<if_stmt>feature_name<in>input_name_shape_dict<block_start>input_shape=[1<if>a<is><none><else>a<for>a input_name_shape_dict[feature_name]]<block_end><else_stmt><block_start>input_shape=[1<if>a<is><none><else>a<for>a model.input_shape[i]]<block_end>X=_generate_data(input_shape mode)<line_sep>input_data.append(X)<if_stmt>one_dim_seq_flags<is><none><block_start>coreml_input[feature_name]=_keras_transpose(X).astype("f").copy()<block_end><else_stmt><block_start>coreml_input[feature_name]=(_keras_transpose(X one_dim_seq_flags[i]).astype("f").copy())<block_end><block_end><block_end><else_stmt><block_start>input_names=["data"]<if_stmt>"data"<in>input_name_shape_dict<block_start>input_shape=[1<if>a<is><none><else>a<for>a input_name_shape_dict["data"]]<block_end><else_stmt><block_start>input_shape=[1<if>a<is><none><else>a<for>a model.input_shape]<block_end>input_data=_generate_data(input_shape mode)<if_stmt>one_dim_seq_flags<is><none><block_start>coreml_input={"data":_keras_transpose(input_data).astype("f").copy()}<block_end><else_stmt><block_start>coreml_input={"data":_keras_transpose(input_data one_dim_seq_flags[0]).astype("f").copy()}<block_end><block_end>output_names=["output"+str(i)<for>i range(len(model.outputs))]<line_sep><return>input_names output_names input_data coreml_input<block_end><def_stmt>_test_model self model input_name_shape_dict={} num_samples=1 mode="random" delta=1e-2 model_dir=<none> transpose_keras_result=<true> one_dim_seq_flags=<none> model_precision=_MLMODEL_FULL_PRECISION # transpose_keras_result: if true, compare the transposed Keras result # one_dim_seq_flags: a list of same length as the number of inputs in # the model; if None, treat all 1D input (if any) as non-sequence # if one_dim_seq_flags[i] is True, it means the ith input, with shape # (X,) is in fact a sequence of length X. # Get the CoreML model <block_start>use_tmp_folder=<false><if_stmt>model_dir<is><none><block_start>use_tmp_folder=<true><line_sep>model_dir=tempfile.mkdtemp()<block_end>(input_names output_names input_data coreml_input )=self._get_coreml_model_params_and_test_input(model mode one_dim_seq_flags input_name_shape_dict)<line_sep>coreml_model=_get_coreml_model(model input_names output_names input_name_shape_dict model_precision=model_precision )<try_stmt><block_start><if_stmt><not>(_is_macos()<and>_macos_version()<ge>(10 13))<block_start><return><block_end># Assuming coreml model output names are in the same order as # Keras output list, put predictions into a list, sorted by output # name coreml_preds=coreml_model.predict(coreml_input)<line_sep>c_preds=[coreml_preds[name]<for>name output_names]<line_sep># Get Keras predictions keras_preds=model.predict(input_data)<line_sep>k_preds=keras_preds<if>type(keras_preds)<is>list<else>[keras_preds]<line_sep># Compare each output blob <for_stmt>idx,k_pred enumerate(k_preds)<block_start><if_stmt>transpose_keras_result<block_start>kp=_keras_transpose(k_pred).flatten()<block_end><else_stmt><block_start>kp=k_pred.flatten()<block_end>cp=c_preds[idx].flatten()<line_sep># Compare predictions self.assertEqual(len(kp) len(cp))<for_stmt>i range(len(kp))<block_start>max_den=max(1.0 kp[i] cp[i])<line_sep>self.assertAlmostEqual(kp[i]/max_den cp[i]/max_den delta=delta)<block_end><block_end><block_end><finally_stmt># Cleanup files - models on disk no longer useful <block_start><if_stmt>use_tmp_folder<and>os.path.exists(model_dir)<block_start>shutil.rmtree(model_dir)<block_end><block_end><block_end><block_end>@unittest.skipIf(<not>_HAS_KERAS2_TF "Missing keras. Skipping tests.")@pytest.mark.keras2<class_stmt>KerasBasicNumericCorrectnessTest(KerasNumericCorrectnessTest)<block_start><def_stmt>test_tiny_inner_product self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(2 input_shape=(2 )))<line_sep># Test all zeros model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="zeros" model_precision=model_precision)<line_sep># Test all ones model.set_weights([np.ones(w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="ones" model_precision=model_precision)<line_sep># Test random model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_inner_product_half_precision self<block_start>self.test_tiny_inner_product(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_inner_product_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(1000 input_shape=(100 )))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_inner_product_half_precision_random self<block_start>self.test_inner_product_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_dense_softmax self<block_start>np.random.seed(1988)<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(32 input_shape=(32 ) activation="softmax"))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_dense_elu self<block_start>np.random.seed(1988)<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(32 input_shape=(32 ) activation="elu"))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_dense_selu self<block_start>np.random.seed(1988)<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(32 input_shape=(32 ) activation="selu"))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_housenet_random self<block_start>np.random.seed(1988)<line_sep>num_hidden=2<line_sep>num_features=3<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(num_hidden input_dim=num_features))<line_sep>model.add(Activation("relu"))<line_sep>model.add(Dense(1 input_dim=num_features))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv_ones self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels,kernel_height,kernel_width=3 5 5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.ones(w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_ones_half_precision self<block_start>self.test_tiny_conv_ones(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels,kernel_height,kernel_width=3 5 5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end>@unittest.skipUnless(_is_macos()<and>_macos_version()<ge>(10 14) "Only supported on MacOS 10.14+")<def_stmt>test_tiny_conv_random_input_shape_dict self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>H,W,C=10 20 5<line_sep>input_shape=(<none> H W C)<line_sep>num_kernels,kernel_height,kernel_width=3 5 5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=(<none> <none> C) filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model input_name_shape_dict={"data":input_shape} model_precision=model_precision )<block_end><def_stmt>test_tiny_conv_random_half_precision self<block_start>self.test_tiny_conv_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_dilated self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels,kernel_height,kernel_width=3 5 5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape dilation_rate=(2 2) filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_dilated_half_precision self<block_start><return>self.test_tiny_conv_dilated(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_dilated_rect_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_shape=(32 20 3)<line_sep>num_kernels=2<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape dilation_rate=(2 2) filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_dilated_rect_random_half_precision self<block_start><return>self.test_tiny_conv_dilated_rect_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_pseudo_1d_x self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=5<line_sep>filter_length=1# 3 nb_filters=1<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(nb_filters kernel_size=(1 filter_length) input_shape=(1 input_length input_dim) padding="valid" ))<line_sep># Set some random weights model.set_weights([np.ones(w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="linear" model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_pseudo_1d_x_half_precision self<block_start><return>self.test_tiny_conv_pseudo_1d_x(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv1d_same_random self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv1d_same_random_input_shape_dict self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(<none> input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model input_name_shape_dict={"data":(<none> input_length input_dim)})<block_end><def_stmt>test_large_input_length_conv1d_same_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=80<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_large_input_length_conv1d_same_random_half_precision self<block_start><return>self.test_large_input_length_conv1d_same_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv1d_valid_random self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="valid" input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv1d_dilated_random self<block_start>np.random.seed(1988)<line_sep>input_shape=(20 1)<line_sep>num_kernels=2<line_sep>filter_length=3<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv1D(num_kernels kernel_size=filter_length padding="valid" input_shape=input_shape dilation_rate=3 ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv_rect_kernel_x self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels=3<line_sep>kernel_height=1<line_sep>kernel_width=5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) padding="same" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv_rect_kernel_y self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels=3<line_sep>kernel_height=5<line_sep>kernel_width=1<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) padding="valid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv_rect_kernel_xy self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels=3<line_sep>kernel_height=5<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) padding="valid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_rect_kernel_xy_half_precision self<block_start>self.test_tiny_conv_rect_kernel_xy(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_flatten self<block_start>model=Sequential()<line_sep>model.add(Flatten(input_shape=(2 2 2)))<line_sep>self._test_model(model mode="linear")<block_end><def_stmt>test_conv_dense self model_precision=_MLMODEL_FULL_PRECISION<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(Conv2D(32 (3 3) activation="relu" input_shape=input_shape))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(10 activation="softmax"))<line_sep># Get the coreml model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_conv_dense_half_precision self<block_start><return>self.test_conv_dense(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_conv_batchnorm_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 3)<line_sep>num_kernels=3<line_sep>kernel_height=5<line_sep>kernel_width=5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep>model.add(BatchNormalization(epsilon=1e-5))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_conv_batchnorm_random_half_precision self<block_start><return>self.test_conv_batchnorm_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_conv_batchnorm_no_gamma_no_beta self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 3)<line_sep>num_kernels=3<line_sep>kernel_height=5<line_sep>kernel_width=5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep>model.add(BatchNormalization(center=<false> scale=<false> epsilon=1e-5))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_conv_batchnorm_no_gamma_no_beta_half_precision self<block_start><return>self.test_conv_batchnorm_no_gamma_no_beta(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_deconv_random self# In Keras 2, deconvolution auto computes the output shape. <block_start>np.random.seed(1988)<line_sep>input_dim=13<line_sep>input_shape=(input_dim input_dim 5)<line_sep>num_kernels=16<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2DTranspose(filters=num_kernels kernel_size=(kernel_height kernel_width) input_shape=input_shape padding="valid" use_bias=<false> ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_deconv_random_same_padding self<block_start>np.random.seed(1988)<line_sep>input_dim=14<line_sep>input_shape=(input_dim input_dim 3)<line_sep>num_kernels=16<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2DTranspose(filters=num_kernels kernel_size=(kernel_height kernel_width) input_shape=input_shape padding="same" strides=(2 2) use_bias=<true> ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_depthwise_conv_same_pad self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=1<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(DepthwiseConv2D(depth_multiplier=depth_multiplier kernel_size=(kernel_height kernel_width) input_shape=input_shape padding="same" strides=(1 1) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_depthwise_conv_valid_pad self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=1<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(DepthwiseConv2D(depth_multiplier=depth_multiplier kernel_size=(kernel_height kernel_width) input_shape=input_shape padding="valid" strides=(1 1) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_depthwise_conv_same_pad_depth_multiplier self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=4<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(DepthwiseConv2D(depth_multiplier=depth_multiplier kernel_size=(kernel_height kernel_width) input_shape=input_shape padding="same" strides=(1 1) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_depthwise_conv_valid_pad_depth_multiplier self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=2<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(DepthwiseConv2D(depth_multiplier=depth_multiplier kernel_size=(kernel_height kernel_width) input_shape=input_shape padding="valid" strides=(1 1) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_separable_conv_valid self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=1<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep>num_kernels=4<line_sep># Define a model model=Sequential()<line_sep>model.add(SeparableConv2D(filters=num_kernels kernel_size=(kernel_height kernel_width) padding="valid" strides=(1 1) depth_multiplier=depth_multiplier input_shape=input_shape ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_separable_conv_same_fancy self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=1<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep>num_kernels=4<line_sep># Define a model model=Sequential()<line_sep>model.add(SeparableConv2D(filters=num_kernels kernel_size=(kernel_height kernel_width) padding="same" strides=(2 2) activation="relu" depth_multiplier=depth_multiplier input_shape=input_shape ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_separable_conv_valid_depth_multiplier self<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=5<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep>num_kernels=40<line_sep># Define a model model=Sequential()<line_sep>model.add(SeparableConv2D(filters=num_kernels kernel_size=(kernel_height kernel_width) padding="valid" strides=(1 1) depth_multiplier=depth_multiplier input_shape=input_shape ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_separable_conv_same_fancy_depth_multiplier self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=16<line_sep>input_shape=(input_dim input_dim 3)<line_sep>depth_multiplier=2<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep>num_kernels=40<line_sep># Define a model model=Sequential()<line_sep>model.add(SeparableConv2D(filters=num_kernels kernel_size=(kernel_height kernel_width) padding="same" strides=(2 2) activation="relu" depth_multiplier=depth_multiplier input_shape=input_shape ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_separable_conv_same_fancy_depth_multiplier_half_precision self<block_start><return>self.test_tiny_separable_conv_same_fancy_depth_multiplier(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_separable_conv_dilated self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels,kernel_height,kernel_width=3 5 5<line_sep># Define a model model=Sequential()<line_sep>model.add(SeparableConv2D(input_shape=input_shape dilation_rate=(2 2) filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_separable_conv_dilated_half_precision self<block_start><return>self.test_tiny_separable_conv_dilated(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_separable_conv_dilated_rect_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_shape=(32 20 3)<line_sep>num_kernels=2<line_sep>kernel_height=3<line_sep>kernel_width=3<line_sep># Define a model model=Sequential()<line_sep>model.add(SeparableConv2D(input_shape=input_shape dilation_rate=(2 2) filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_separable_conv_dilated_rect_random_half_precision self<block_start><return>self.test_tiny_separable_conv_dilated_rect_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_max_pooling_no_overlap self# no_overlap: pool_size = strides <block_start>model=Sequential()<line_sep>model.add(MaxPooling2D(input_shape=(16 16 3) pool_size=(2 2) strides=<none> padding="valid"))<line_sep>self._test_model(model)<block_end><def_stmt>test_max_pooling_overlap_multiple self# input shape is multiple of pool_size, strides != pool_size <block_start>model=Sequential()<line_sep>model.add(MaxPooling2D(input_shape=(18 18 3) pool_size=(3 3) strides=(2 2) padding="valid" ))<line_sep>self._test_model(model)<block_end><def_stmt>test_max_pooling_overlap_odd self<block_start>model=Sequential()<line_sep>model.add(MaxPooling2D(input_shape=(16 16 3) pool_size=(3 3) strides=(2 2) padding="valid" ))<line_sep>self._test_model(model)<block_end><def_stmt>test_max_pooling_overlap_same self<block_start>model=Sequential()<line_sep>model.add(MaxPooling2D(input_shape=(16 16 3) pool_size=(3 3) strides=(2 2) padding="same" ))<line_sep>self._test_model(model)<block_end><def_stmt>test_global_max_pooling self<block_start>model=Sequential()<line_sep>model.add(GlobalMaxPooling2D(input_shape=(16 16 3)))<line_sep>self._test_model(model)<block_end><def_stmt>test_average_pooling_no_overlap self# no_overlap: pool_size = strides <block_start>model=Sequential()<line_sep>model.add(AveragePooling2D(input_shape=(16 16 3) pool_size=(2 2) strides=<none> padding="valid"))<line_sep>self._test_model(model delta=1e-2)<block_end><def_stmt>test_average_pooling_inception_config_1 self# no_overlap: pool_size = strides <block_start>model=Sequential()<line_sep>model.add(AveragePooling2D(input_shape=(16 16 3) pool_size=(3 3) strides=(1 1) padding="same" ))<line_sep>self._test_model(model delta=1e-2)<block_end><def_stmt>test_global_average_pooling self<block_start>model=Sequential()<line_sep>model.add(GlobalAveragePooling2D(input_shape=(16 16 3)))<line_sep>self._test_model(model)<block_end><def_stmt>test_max_pooling_1d self<block_start>model=Sequential()<line_sep>model.add(MaxPooling1D(input_shape=(16 3) pool_size=4))<line_sep>self._test_model(model)<block_end><def_stmt>test_global_max_pooling_1d self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep>model.add(GlobalMaxPooling1D())<line_sep>self._test_model(model)<block_end><def_stmt>test_average_pooling_1d self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep>model.add(AveragePooling1D(pool_size=2))<line_sep>self._test_model(model)<block_end><def_stmt>test_global_average_pooling_1d self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep>model.add(GlobalAveragePooling1D())<line_sep>self._test_model(model)<block_end><def_stmt>test_tiny_conv_upsample_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>num_kernels=3<line_sep>kernel_height=5<line_sep>kernel_width=5<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep>model.add(UpSampling2D(size=2))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv_upsample_1d_random self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep>model.add(UpSampling1D(size=2))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_conv_crop_1d_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep>model.add(Cropping1D(cropping=2))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_crop_1d_random_half_precision self<block_start><return>self.test_tiny_conv_crop_1d_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_pad_1d_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=10<line_sep>filter_length=3<line_sep>nb_filters=4<line_sep>model=Sequential()<line_sep>model.add(Conv1D(nb_filters kernel_size=filter_length padding="same" input_shape=(input_length input_dim) ))<line_sep>model.add(ZeroPadding1D(padding=2))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_pad_1d_random_half_precision self<block_start><return>self.test_tiny_conv_pad_1d_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_causal_1d self<block_start>np.random.seed(1988)<line_sep>model=Sequential()<line_sep>model.add(Conv1D(1 3 input_shape=(10 1) use_bias=<false> padding="causal"))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_embedding self model_precision=_MLMODEL_FULL_PRECISION<block_start>model=Sequential()<line_sep>num_inputs=10<line_sep>num_outputs=3<line_sep>model.add(Embedding(num_inputs num_outputs))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_embedding_half_precision self<block_start><return>self.test_embedding(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_embedding_seq self model_precision=_MLMODEL_FULL_PRECISION<block_start>model=Sequential()<line_sep>num_inputs=10<line_sep>num_outputs=3<line_sep>model.add(Embedding(num_inputs num_outputs input_length=7))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model one_dim_seq_flags=[<true>] model_precision=model_precision)<block_end><def_stmt>test_embedding_seq_half_precision self<block_start><return>self.test_embedding_seq(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_no_sequence_simple_rnn_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=1<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(num_channels input_shape=(input_length input_dim)))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_sequence_simple_rnn_random self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=4<line_sep>num_channels=3<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(num_channels input_shape=(input_length input_dim)))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_seq2seq_rnn_random self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=4<line_sep>num_channels=3<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(num_channels input_shape=(input_length input_dim) return_sequences=<true> ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_rnn_seq self<block_start>np.random.seed(1988)<line_sep>input_dim=11<line_sep>input_length=5<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(20 input_shape=(input_length input_dim) return_sequences=<false>))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_rnn_seq_backwards self<block_start>np.random.seed(1988)<line_sep>input_dim=11<line_sep>input_length=5<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(20 input_shape=(input_length input_dim) return_sequences=<false> go_backwards=<true> ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_medium_no_sequence_simple_rnn_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=10<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(num_channels input_shape=(input_length input_dim)))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_no_sequence_lstm_zeros self<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=1<line_sep>num_channels=1<line_sep>model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=1 recurrent_activation="sigmoid" ))<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep>self._test_model(model mode="zeros")<block_end><def_stmt>test_tiny_no_sequence_lstm_ones self<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=1<line_sep>num_channels=1<line_sep>model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=1 recurrent_activation="sigmoid" ))<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep>self._test_model(model mode="ones")<block_end><def_stmt>test_small_no_sequence_lstm_zeros self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=1<line_sep>model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=2 recurrent_activation="sigmoid" ))<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep>self._test_model(model mode="zeros")<block_end><def_stmt>test_small_no_sequence_lstm_ones self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=1<line_sep>model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=2 recurrent_activation="sigmoid" ))<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep>self._test_model(model mode="ones")<block_end><def_stmt>test_lstm_seq self<block_start>np.random.seed(1988)<line_sep>input_dim=11<line_sep>input_length=5<line_sep>model=Sequential()<line_sep>model.add(LSTM(20 input_shape=(input_length input_dim) return_sequences=<false>))<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_lstm_seq_backwards self<block_start>np.random.seed(1988)<line_sep>input_dim=11<line_sep>input_length=5<line_sep>model=Sequential()<line_sep>model.add(LSTM(20 input_shape=(input_length input_dim) return_sequences=<false> go_backwards=<true> ))<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_medium_no_sequence_lstm_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=10<line_sep># Define a model model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_no_sequence_lstm_zeros_gpu self<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=1<line_sep>num_channels=1<line_sep># Define a model model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=2 recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model mode="zeros")<block_end><def_stmt>test_small_no_sequence_lstm_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=1<line_sep># Define a model model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=2 recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_tiny_no_sequence_gru_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=1<line_sep>num_channels=1<line_sep>num_samples=1<line_sep># Define a model model=Sequential()<line_sep>model.add(GRU(num_channels input_shape=(input_length input_dim) recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_no_sequence_gru_random_half_precision self<block_start><return>self.test_tiny_no_sequence_gru_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_small_no_sequence_gru_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=1<line_sep># Define a model model=Sequential()<line_sep>model.add(GRU(num_channels input_shape=(input_length input_dim) recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_medium_no_sequence_gru_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=10<line_sep># Define a model model=Sequential()<line_sep>model.add(GRU(num_channels input_shape=(input_length input_dim) recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_medium_no_sequence_gru_random_half_precision self<block_start><return>self.test_medium_no_sequence_gru_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_gru_seq self<block_start>np.random.seed(1988)<line_sep>input_dim=11<line_sep>input_length=5<line_sep># Define a model model=Sequential()<line_sep>model.add(GRU(20 input_shape=(input_length input_dim) return_sequences=<false>))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_gru_seq_backwards self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=11<line_sep>input_length=5<line_sep># Define a model model=Sequential()<line_sep>model.add(GRU(20 input_shape=(input_length input_dim) return_sequences=<false> go_backwards=<true> ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_gru_seq_backwards_half_precision self<block_start><return>self.test_gru_seq_backwards(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_no_sequence_bidir_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=1<line_sep>num_channels=1<line_sep>num_samples=1<line_sep># Define a model model=Sequential()<line_sep>model.add(Bidirectional(LSTM(num_channels implementation=1 recurrent_activation="sigmoid") input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_no_sequence_bidir_random_half_precision self<block_start><return>self.test_tiny_no_sequence_bidir_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_no_sequence_bidir_random_gpu self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=1<line_sep>num_channels=1<line_sep>num_samples=1<line_sep># Define a model model=Sequential()<line_sep>model.add(Bidirectional(LSTM(num_channels implementation=2 recurrent_activation="sigmoid") input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_no_sequence_bidir_random_gpu_half_precision self<block_start><return>self.test_tiny_no_sequence_bidir_random_gpu(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_small_no_sequence_bidir_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=1<line_sep># Define a model model=Sequential()<line_sep>model.add(Bidirectional(LSTM(num_channels implementation=2 recurrent_activation="sigmoid") input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_medium_no_sequence_bidir_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_length=1<line_sep>num_channels=10<line_sep># Define a model model=Sequential()<line_sep>model.add(Bidirectional(LSTM(num_channels implementation=2 recurrent_activation="sigmoid") input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_medium_bidir_random_return_seq_false self<block_start>np.random.seed(1988)<line_sep>input_dim=7<line_sep>input_length=5<line_sep>num_channels=10<line_sep># Define a model model=Sequential()<line_sep>model.add(Bidirectional(LSTM(num_channels return_sequences=<false> implementation=2 recurrent_activation="sigmoid" ) input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_medium_bidir_random_return_seq_true self<block_start>np.random.seed(1988)<line_sep>input_dim=7<line_sep>input_length=5<line_sep>num_channels=10<line_sep># Define a model model=Sequential()<line_sep>model.add(Bidirectional(LSTM(num_channels return_sequences=<true> implementation=2 recurrent_activation="sigmoid" ) input_shape=(input_length input_dim) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end><def_stmt>test_bilstm_merge_modes self# issue 157 <block_start><def_stmt>get_model input_dim fc_size rnn_size output_dim merge_mode<block_start>input_data=Input(name="the_input" shape=(<none> input_dim))<line_sep>x=TimeDistributed(Dense(fc_size name="fc1" activation="relu" ))(input_data)<line_sep>x=Bidirectional(LSTM(rnn_size return_sequences=<true> activation="relu" kernel_initializer="he_normal" ) merge_mode=merge_mode )(x)<line_sep>y_pred=TimeDistributed(Dense(output_dim name="y_pred" activation="softmax"))(x)<line_sep>model=Model([input_data] [y_pred])<line_sep>model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep><return>model<block_end>input_dim=26<line_sep>fc_size=512<line_sep>rnn_size=512<line_sep>output_dim=29<for_stmt>merge_mode ["concat" "sum" "mul" "ave"]<block_start>model=get_model(input_dim fc_size rnn_size output_dim merge_mode)<line_sep>self._test_model(model)<block_end><block_end><def_stmt>test_tiny_conv_elu_random self<block_start>np.random.seed(1988)<line_sep># Define a model <import_from_stmt>keras.layers.advanced_activations ELU<line_sep>model=Sequential()<line_sep>model.add(Conv2D(input_shape=(10 10 3) filters=3 kernel_size=(5 5)))<line_sep>model.add(ELU(alpha=0.8))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_conv_prelu_random self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep># Define a model <import_from_stmt>keras.layers.advanced_activations PReLU<line_sep>model=Sequential()<line_sep>model.add(Conv2D(input_shape=(10 10 3) filters=3 kernel_size=(5 5) padding="same"))<line_sep>model.add(PReLU(shared_axes=[1 2]))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_tiny_conv_prelu_random_half_precision self<block_start><return>self.test_tiny_conv_prelu_random(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_conv_leaky_relu_random self<block_start>np.random.seed(1988)<line_sep># Define a model <import_from_stmt>keras.layers.advanced_activations LeakyReLU<line_sep>model=Sequential()<line_sep>model.add(Conv2D(input_shape=(10 10 3) filters=3 kernel_size=(5 5) padding="same"))<line_sep>model.add(LeakyReLU(alpha=0.3))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_conv_thresholded_relu_random self<block_start>np.random.seed(1988)<line_sep># Define a model <import_from_stmt>keras.layers.advanced_activations ThresholdedReLU<line_sep>model=Sequential()<line_sep>model.add(Conv2D(input_shape=(10 10 3) filters=3 kernel_size=(5 5) padding="same"))<line_sep>model.add(ThresholdedReLU(theta=0.8))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_concat_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>num_channels=6<line_sep># Define a model input_tensor=Input(shape=(input_dim ))<line_sep>x1=Dense(num_channels)(input_tensor)<line_sep>x2=Dense(num_channels)(x1)<line_sep>x3=Dense(num_channels)(x1)<line_sep>x4=concatenate([x2 x3])<line_sep>x5=Dense(num_channels)(x4)<line_sep>model=Model(inputs=[input_tensor] outputs=[x5])<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_concat_seq_random self<block_start>np.random.seed(1988)<line_sep>max_features=10<line_sep>embedding_dims=4<line_sep>seq_len=5<line_sep>num_channels=6<line_sep># Define a model input_tensor=Input(shape=(seq_len ))<line_sep>x1=Embedding(max_features embedding_dims)(input_tensor)<line_sep>x2=Embedding(max_features embedding_dims)(input_tensor)<line_sep>x3=concatenate([x1 x2] axis=1)<line_sep>model=Model(inputs=[input_tensor] outputs=[x3])<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model one_dim_seq_flags=[<true>])<block_end><def_stmt>test_lstm_concat_dense_random self<block_start>np.random.seed(1988)<line_sep>vocab_size=1250<line_sep>seq_length=5<line_sep>units=32<line_sep># Define a model input=Input(shape=(seq_length ))<line_sep>pos=Input(shape=(seq_length 1))<line_sep>embedding=Embedding(vocab_size 50 input_length=seq_length)(input)<line_sep>concat=Concatenate(axis=2)([embedding pos])<line_sep>model=LSTM(units return_sequences=<true> stateful=<false>)(concat)<line_sep>model=LSTM(units return_sequences=<false>)(model)<line_sep>model=Dense(100 activation="relu")(model)<line_sep>model=Dense(vocab_size activation="softmax")(model)<line_sep>model=Model(inputs=[input pos] outputs=model)<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model one_dim_seq_flags=[<true> <true>])<block_end><def_stmt>test_tiny_add_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>num_channels=6<line_sep># Define a model input_tensor=Input(shape=(input_dim ))<line_sep>x1=Dense(num_channels)(input_tensor)<line_sep>x2=Dense(num_channels)(x1)<line_sep>x3=Dense(num_channels)(x1)<line_sep>x4=add([x2 x3])<line_sep>x5=Dense(num_channels)(x4)<line_sep>model=Model(inputs=[input_tensor] outputs=[x5])<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_mul_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>num_channels=6<line_sep># Define a model input_tensor=Input(shape=(input_dim ))<line_sep>x1=Dense(num_channels)(input_tensor)<line_sep>x2=Dense(num_channels)(x1)<line_sep>x3=Dense(num_channels)(x1)<line_sep>x4=multiply([x2 x3])<line_sep>x5=Dense(num_channels)(x4)<line_sep>model=Model(inputs=[input_tensor] outputs=[x5])<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_cos_random self<block_start>np.random.seed(1988)<line_sep>input_dim=10<line_sep>num_channels=6<line_sep># Define a model input_tensor=Input(shape=(input_dim ))<line_sep>x1=Dense(num_channels)(input_tensor)<line_sep>x2=Dense(num_channels)(x1)<line_sep>x3=Dense(num_channels)(x1)<line_sep>x4=dot([x2 x3] axes=-1 normalize=<true>)<line_sep>x5=Dense(num_channels)(x4)<line_sep>model=Model(inputs=[input_tensor] outputs=[x5])<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_zeropad_simple self<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(ZeroPadding2D((1 1) input_shape=input_shape))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_zeropad_fancy self<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(ZeroPadding2D(((2 5) (3 4)) input_shape=input_shape))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_crop_simple self<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(Cropping2D(cropping=((2 5) (2 5)) input_shape=input_shape))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_permute self# When input blob is 3D array (D1, D2, D3), Keras assumes the axes' meaning is # (D1=H,D2=W,D3=C), while CoreML assumes (D1=C,D2=H,D3=W) <block_start><import_stmt>itertools<for_stmt>permute_order list(itertools.permutations([1 2 3]))<block_start>model=Sequential()<line_sep>model.add(Permute(permute_order input_shape=(4 3 2)))<line_sep>self._test_model(model transpose_keras_result=<true>)<block_end><block_end><def_stmt>test_reshape_3d self<block_start>model=Sequential()<line_sep>model.add(Reshape((10 1 6) input_shape=(5 4 3)))<line_sep>self._test_model(model mode="linear")<block_end><def_stmt>test_tiny_conv_dense_random self<block_start>np.random.seed(1988)<line_sep>num_samples=1<line_sep>input_dim=8<line_sep>input_shape=(input_dim input_dim 3)<line_sep>num_kernels=2<line_sep>kernel_height=5<line_sep>kernel_width=5<line_sep>hidden_dim=4<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep>model.add(Dropout(0.5))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(hidden_dim))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_conv_dropout_random self<block_start>np.random.seed(1988)<line_sep>num_samples=1<line_sep>input_dim=8<line_sep>input_shape=(input_dim input_dim 3)<line_sep>num_kernels=2<line_sep>kernel_height=5<line_sep>kernel_width=5<line_sep>hidden_dim=4<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep>model.add(SpatialDropout2D(0.5))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(hidden_dim))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_dense_tanh_fused_random self<block_start>np.random.seed(1988)<line_sep>num_samples=1<line_sep>input_dim=3<line_sep>hidden_dim=4<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(hidden_dim input_shape=(input_dim ) activation="tanh"))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_conv_relu_fused_random self<block_start>np.random.seed(1988)<line_sep>num_samples=1<line_sep>input_dim=8<line_sep>input_shape=(input_dim input_dim 3)<line_sep>num_kernels=2<line_sep>kernel_height=5<line_sep>kernel_width=5<line_sep>hidden_dim=4<line_sep># Define a model model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape activation="relu" filters=num_kernels kernel_size=(kernel_height kernel_width) ))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_tiny_time_distrbuted self# as the first layer in a model <block_start>model=Sequential()<line_sep>model.add(TimeDistributed(Dense(8) input_shape=(10 16)))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_tiny_sequence_lstm self model_precision=_MLMODEL_FULL_PRECISION<block_start>np.random.seed(1988)<line_sep>input_dim=1<line_sep>input_length=2<line_sep>num_channels=1<line_sep># Define a model model=Sequential()<line_sep>model.add(LSTM(num_channels input_shape=(input_length input_dim) implementation=1 recurrent_activation="sigmoid" ))<line_sep># Set some random weights model.set_weights([(np.random.rand(*w.shape)-0.5)<times>0.2<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model delta=1e-4 model_precision=model_precision)<block_end><def_stmt>test_tiny_sequence_lstm_half_precision self<block_start><return>self.test_tiny_sequence_lstm(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_spatial_bn self<block_start>np.random.seed(1988)<line_sep>x_in=Input(shape=(7 7 2))<line_sep>x=ZeroPadding2D(padding=(1 1))(x_in)<line_sep>x=BatchNormalization(axis=2)(x)<line_sep>model=Model(x_in x)<line_sep>self._test_model(model delta=1e-2)<block_end><def_stmt>test_embedding_fixed_length self<block_start>sequence_length=5<line_sep>vocab_size=10<line_sep>embed_channels=4<line_sep>dense_units=sequence_length<times>embed_channels<line_sep>model=Sequential()<line_sep>model.add(Embedding(vocab_size embed_channels input_length=sequence_length))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(dense_units))<line_sep>model.add(Dense(20))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model one_dim_seq_flags=[<true>])<block_end><def_stmt>test_conv1d_flatten self delta=1e-2<block_start>model=Sequential()<line_sep>model.add(AveragePooling1D(2 input_shape=(64 9)))<line_sep>model.add(Conv1D(16 1 padding="same" activation="relu" use_bias=<false>))<line_sep>model.add(MaxPooling1D(2))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(units=7 activation="softmax" use_bias=<false>))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model delta=delta)<block_end><def_stmt>test_dense_fused_act_in_td self<block_start>np.random.seed(1988)<line_sep>x_in=Input(shape=(10 2))<line_sep>x=TimeDistributed(Dense(6 activation="softmax"))(x_in)<line_sep>model=Model(inputs=[x_in] outputs=[x])<line_sep>self._test_model(model delta=1e-4)<block_end><def_stmt>test_conv_batch_1d self<block_start>np.random.seed(1988)<line_sep>vocabulary_size=4<line_sep>embedding_dimension=6<line_sep>input_length=10<line_sep>model=Sequential()<line_sep>model.add(Embedding(vocabulary_size embedding_dimension input_length=input_length trainable=<true> ))<line_sep>model.add(Conv1D(5 2))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Activation("relu"))<line_sep>model.add(MaxPooling1D(2))<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model one_dim_seq_flags=[<true>])<block_end><def_stmt>test_lstm_td self<block_start>np.random.seed(1988)<line_sep>input_dim=2<line_sep>input_length=4<line_sep>num_channels=3<line_sep># Define a model model=Sequential()<line_sep>model.add(SimpleRNN(num_channels return_sequences=<true> input_shape=(input_length input_dim) ))<line_sep>model.add(TimeDistributed(Dense(5)))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<times>0.2-0.1<for>w model.get_weights()])<line_sep># Test the keras model self._test_model(model)<block_end># Making sure that giant channel sizes get handled correctly <def_stmt>test_large_channel_gpu self<block_start>input_shape=(20 20 3)<line_sep>num_channels=2049<line_sep>kernel_size=3<line_sep>model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape filters=num_channels kernel_size=(kernel_size kernel_size) ))<line_sep>model.set_weights([(np.random.rand(*w.shape)-0.5)<times>0.2<for>w model.get_weights()])<line_sep>self._test_model(model delta=1e-2)<block_end>@pytest.mark.xfail(raises=Exception)<def_stmt>test_large_batch_gpu self<block_start>batch_size=2049<line_sep>num_channels=4<line_sep>kernel_size=3<line_sep>model=Sequential()<line_sep>model.add(TimeDistributed(Dense(num_channels) input_shape=(batch_size kernel_size)))<line_sep>model.set_weights([(np.random.rand(*w.shape)-0.5)<times>0.2<for>w model.get_weights()])<line_sep>self._test_model(model delta=1e-2)<block_end><block_end>@unittest.skipIf(<not>_HAS_KERAS2_TF "Missing keras. Skipping tests.")@pytest.mark.keras2<class_stmt>KerasTopologyCorrectnessTest(KerasNumericCorrectnessTest)<block_start><def_stmt>test_dangling_merge_left self<block_start>x1=Input(shape=(4 ) name="input1")<line_sep>x2=Input(shape=(5 ) name="input2")<line_sep>y1=Dense(6 name="dense")(x2)<line_sep>z=concatenate([x1 y1])<line_sep>model=Model(inputs=[x1 x2] outputs=[z])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_dangling_merge_right self<block_start>x1=Input(shape=(4 ) name="input1")<line_sep>x2=Input(shape=(5 ) name="input2")<line_sep>y1=Dense(6 name="dense")(x2)<line_sep>z=concatenate([y1 x1])<line_sep>model=Model(inputs=[x1 x2] outputs=[z])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_shared_vision self<block_start>digit_input=Input(shape=(27 27 1))<line_sep>x=Conv2D(64 (3 3))(digit_input)<line_sep>x=Conv2D(64 (3 3))(x)<line_sep>out=Flatten()(x)<line_sep>vision_model=Model(inputs=[digit_input] outputs=[out])<line_sep># then define the tell-digits-apart model digit_a=Input(shape=(27 27 1))<line_sep>digit_b=Input(shape=(27 27 1))<line_sep># the vision model will be shared, weights and all out_a=vision_model(digit_a)<line_sep>out_b=vision_model(digit_b)<line_sep>concatenated=concatenate([out_a out_b])<line_sep>out=Dense(1 activation="sigmoid")(concatenated)<line_sep>model=Model(inputs=[digit_a digit_b] outputs=out)<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model)<block_end><def_stmt>test_tiny_weight_sharing self# - Dense1 ----------- # x - | |- Merge # - Dense1 - Dense2 -- <block_start>x=Input(shape=(3 ))<line_sep>dense=Dense(4)<line_sep>y1=dense(x)<line_sep>y2=dense(x)<line_sep>y3=Dense(4)(y2)<line_sep>z=concatenate([y1 y3])<line_sep>model=Model(inputs=[x] outputs=[z])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_tiny_multiple_outputs self<block_start>x=Input(shape=(3 ))<line_sep>y1=Dense(4)(x)<line_sep>y2=Dense(5)(x)<line_sep>model=Model([x] [y1 y2])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_intermediate_outputs_dense self<block_start>x=Input(shape=(3 ))<line_sep>y=Dense(4 name="intermediate_dense_y")(x)<line_sep>z=Dense(5 name="intermediate_dense_z")(y)<line_sep>model=Model([x] [y z])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_intermediate_outputs_conv2d self<block_start>x=Input(shape=(8 8 3))<line_sep>y=Conv2D(4 (3 3) name="intermdiate_conv2d_1")(x)<line_sep>z=Conv2D(5 (3 3) name="intermdiate_conv2d_2")(y)<line_sep>model=Model([x] [y z])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_intermediate_outputs_conv2d_fused_act self<block_start>x=Input(shape=(8 8 3))<line_sep>y=Conv2D(4 (3 3) name="intermdiate_conv2d_1_fused" activation="relu")(x)<line_sep>z=Conv2D(5 (3 3) name="intermdiate_conv2d_2_fused" activation="relu")(y)<line_sep>model=Model([x] [y z])<line_sep>model.set_weights([np.random.rand(*w.shape)-0.5<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_intermediate_outputs_conv1d self<block_start>x=Input(shape=(10 3))<line_sep>y=Conv1D(4 3 name="intermdiate_conv1d_1")(x)<line_sep>z=Conv1D(5 3 name="intermdiate_conv1d_2")(y)<line_sep>model=Model([x] [y z])<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_intermediate_outputs_conv1d_fused_act self<block_start>x=Input(shape=(10 3))<line_sep>y=Conv1D(4 3 name="intermdiate_conv1d_1_fused" activation="relu")(x)<line_sep>z=Conv1D(5 3 name="intermdiate_conv1d_2_fused" activation="relu")(y)<line_sep>model=Model([x] [y z])<line_sep>model.set_weights([np.random.rand(*w.shape)-0.5<for>w model.get_weights()])<line_sep>self._test_model(model mode="random" delta=1e-2)<block_end><def_stmt>test_intermediate_rcnn_1d self<block_start>x_in=Input(shape=(10 2))<line_sep># Conv block 1 x=Conv1D(3 3 padding="same" name="interm_rcnn_conv1")(x_in)<line_sep>x=BatchNormalization(axis=-1 name="interm_rcnn_bn1")(x)<line_sep>x=Activation("elu")(x)<line_sep>x=MaxPooling1D(pool_size=2 name="interm_rcnn_pool1")(x)<line_sep>out1=x# out1.shape = (5,3) x=GRU(6 name="gru1")(x)<line_sep>out2=x<line_sep>model=Model(x_in [out1 out2])<line_sep># model = Model(x_in, [out2]) self._test_model(model mode="random_zero_mean" delta=1e-2)<block_end><def_stmt>test_tiny_mobilenet_arch self model_precision=_MLMODEL_FULL_PRECISION<block_start><def_stmt>ReLU6 x name<block_start><if_stmt>keras.__version__<ge>_StrictVersion("2.2.1")<block_start><return>ReLU(6.0 name=name)(x)<block_end><else_stmt><block_start><return>Activation(relu6 name=name)(x)<block_end><block_end>img_input=Input(shape=(32 32 3))<line_sep>x=Conv2D(4 (3 3) padding="same" use_bias=<false> strides=(2 2) name="conv1")(img_input)<line_sep>x=BatchNormalization(axis=-1 name="conv1_bn")(x)<line_sep>x=ReLU6(x name="conv1_relu")<line_sep>x=DepthwiseConv2D((3 3) padding="same" depth_multiplier=1 strides=(1 1) use_bias=<false> name="conv_dw_1" )(x)<line_sep>x=BatchNormalization(axis=-1 name="conv_dw_1_bn")(x)<line_sep>x=ReLU6(x name="conv_dw_1_relu")<line_sep>x=Conv2D(8 (1 1) padding="same" use_bias=<false> strides=(1 1) name="conv_pw_1")(x)<line_sep>x=BatchNormalization(axis=-1 name="conv_pw_1_bn")(x)<line_sep>x=ReLU6(x name="conv_pw_1_relu")<line_sep>x=DepthwiseConv2D((3 3) padding="same" depth_multiplier=1 strides=(2 2) use_bias=<false> name="conv_dw_2" )(x)<line_sep>x=BatchNormalization(axis=-1 name="conv_dw_2_bn")(x)<line_sep>x=ReLU6(x name="conv_dw_2_relu")<line_sep>x=Conv2D(8 (1 1) padding="same" use_bias=<false> strides=(2 2) name="conv_pw_2")(x)<line_sep>x=BatchNormalization(axis=-1 name="conv_pw_2_bn")(x)<line_sep>x=ReLU6(x name="conv_pw_2_relu")<line_sep>model=Model(inputs=[img_input] outputs=[x])<line_sep>self._test_model(model delta=1e-2 model_precision=model_precision)<block_end><def_stmt>test_tiny_mobilenet_arch_half_precision self<block_start>self.test_tiny_mobilenet_arch(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_tiny_xception self model_precision=_MLMODEL_FULL_PRECISION<block_start>img_input=Input(shape=(32 32 3))<line_sep>x=Conv2D(2 (3 3) strides=(2 2) use_bias=<false> name="block1_conv1")(img_input)<line_sep>x=BatchNormalization(name="block1_conv1_bn")(x)<line_sep>x=Activation("relu" name="block1_conv1_act")(x)<line_sep>x=Conv2D(4 (3 3) use_bias=<false> name="block1_conv2")(x)<line_sep>x=BatchNormalization(name="block1_conv2_bn")(x)<line_sep>x=Activation("relu" name="block1_conv2_act")(x)<line_sep>residual=Conv2D(8 (1 1) strides=(2 2) padding="same" use_bias=<false>)(x)<line_sep>residual=BatchNormalization()(residual)<line_sep>x=SeparableConv2D(8 (3 3) padding="same" use_bias=<false> name="block2_sepconv1")(x)<line_sep>x=BatchNormalization(name="block2_sepconv1_bn")(x)<line_sep>x=Activation("relu" name="block2_sepconv2_act")(x)<line_sep>x=SeparableConv2D(8 (3 3) padding="same" use_bias=<false> name="block2_sepconv2")(x)<line_sep>x=BatchNormalization(name="block2_sepconv2_bn")(x)<line_sep>x=MaxPooling2D((3 3) strides=(2 2) padding="same" name="block2_pool")(x)<line_sep>x=add([x residual])<line_sep>residual=Conv2D(16 (1 1) strides=(2 2) padding="same" use_bias=<false>)(x)<line_sep>residual=BatchNormalization()(residual)<line_sep>model=Model(inputs=[img_input] outputs=[residual])<line_sep>self._test_model(model delta=1e-2 model_precision=model_precision)<block_end><def_stmt>test_tiny_xception_half_precision self<block_start><return>self.test_tiny_xception(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_nested_model_giving_output self<block_start>base_model=Sequential()<line_sep>base_model.add(Conv2D(32 (1 1) input_shape=(4 4 3)))<line_sep>top_model=Sequential()<line_sep>top_model.add(Flatten(input_shape=base_model.output_shape[1:]))<line_sep>top_model.add(Dense(16 activation="relu"))<line_sep>top_model.add(Dense(1 activation="sigmoid"))<line_sep>model=Model(inputs=base_model.input outputs=top_model(base_model.output))<line_sep>self._test_model(model)<block_end># similar to issue 269 <def_stmt>test_time_distributed_conv self<block_start>model=Sequential()<line_sep>model.add(TimeDistributed(Conv2D(64 (3 3) activation="relu") input_shape=(1 30 30 3)))<line_sep>model.add(TimeDistributed(MaxPooling2D((2 2) strides=(1 1))))<line_sep>model.add(TimeDistributed(Conv2D(32 (4 4) activation="relu")))<line_sep>model.add(TimeDistributed(MaxPooling2D((2 2) strides=(2 2))))<line_sep>model.add(TimeDistributed(Conv2D(32 (4 4) activation="relu")))<line_sep>model.add(TimeDistributed(MaxPooling2D((2 2) strides=(2 2))))<line_sep>model.add(TimeDistributed(Flatten()))<line_sep>model.add(Dropout(0.5))<line_sep>model.add(LSTM(32 return_sequences=<false> dropout=0.5))<line_sep>model.add(Dense(10 activation="sigmoid"))<line_sep>self._test_model(model)<block_end><block_end>@[email protected]@unittest.skipIf(<not>_HAS_KERAS2_TF "Missing keras. Skipping tests.")<class_stmt>KerasNumericCorrectnessStressTest(KerasNumericCorrectnessTest)<block_start>""" Unit test class for testing all combinations of a particular layer. """<def_stmt>_run_test self model param model_dir=<none> delta=1e-2 transpose_keras_result=<true> one_dim_seq_flags=<none> model_precision=_MLMODEL_FULL_PRECISION <block_start>""" Run a test on a particular model """<line_sep>use_tmp_folder=<false><if_stmt>model_dir<is><none><block_start>use_tmp_folder=<true><line_sep>model_dir=tempfile.mkdtemp()<block_end>model_path=os.path.join(model_dir "keras.mlmodel")<line_sep># Generate some random data nb_inputs=len(model.inputs)<if_stmt>nb_inputs<g>1<block_start>input_names=[]<line_sep>input_data=[]<line_sep>coreml_input={}<for_stmt>i range(nb_inputs)<block_start>input_shape=[1<if>a<is><none><else>a<for>a model.input_shape[i]]<line_sep>X=_generate_data(input_shape)<line_sep>feature_name="data_%s"%i<line_sep>input_names.append(feature_name)<line_sep>input_data.append(X)<if_stmt>one_dim_seq_flags<is><none><block_start>coreml_input[feature_name]=_keras_transpose(X).astype("f")<block_end><else_stmt><block_start>coreml_input[feature_name]=_keras_transpose(X one_dim_seq_flags[i]).astype("f")<block_end><block_end><block_end><else_stmt><block_start>input_shape=[1<if>a<is><none><else>a<for>a model.input_shape]<line_sep>input_names=["data"]<line_sep>input_data=_generate_data(input_shape)<if_stmt>one_dim_seq_flags<is><none><block_start>coreml_input={"data":_keras_transpose(input_data).astype("f")}<block_end><else_stmt><block_start>coreml_input={"data":_keras_transpose(input_data one_dim_seq_flags[0]).astype("f")}<block_end><block_end># Make predictions <if_stmt>transpose_keras_result<block_start>keras_preds=_keras_transpose(model.predict(input_data)).flatten()<block_end><else_stmt><block_start>keras_preds=model.predict(input_data).flatten()<block_end># Get the model coreml_model=_get_coreml_model(model input_names ["output"] model_precision=model_precision)<if_stmt>_is_macos()<and>_macos_version()<ge>(10 13)# get prediction <block_start>coreml_preds=coreml_model.predict(coreml_input)["output"].flatten()<if_stmt>use_tmp_folder<block_start>shutil.rmtree(model_dir)<block_end>self.assertEqual(len(coreml_preds) len(keras_preds) msg="Failed test case %s. Lengths wrong (%s vs %s)"%(param len(coreml_preds) len(keras_preds)) )<for_stmt>i range(len(keras_preds))<block_start>max_den=max(1.0 keras_preds[i] coreml_preds[i])<line_sep>self.assertAlmostEqual(keras_preds[i]/max_den coreml_preds[i]/max_den delta=delta msg="Failed test case %s. Predictions wrong (%s vs %s)"%(param coreml_preds[i] keras_preds[i]) )<block_end><block_end><block_end>@pytest.mark.slow<def_stmt>test_activation_layer_params self<block_start>options=dict(activation=["tanh" "relu" "sigmoid" "softmax" "softplus" "softsign" "hard_sigmoid" "elu" ])<line_sep># Define a function that tests a model num_channels=10<line_sep>input_dim=10<def_stmt>build_model x<block_start>model=Sequential()<line_sep>model.add(Dense(num_channels input_dim=input_dim))<line_sep>model.add(Activation(**dict(zip(options.keys() x))))<line_sep><return>x model<block_end># Iterate through all combinations product=itertools.product(*options.values())<line_sep>args=[build_model(p)<for>p product]<line_sep># Test the cases print("Testing a total of %s cases. This could take a while"%len(args))<for_stmt>param,model args<block_start>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._run_test(model param)<block_end><block_end>@pytest.mark.slow<def_stmt>test_dense_layer_params self<block_start>options=dict(activation=["relu" "softmax" "tanh" "sigmoid" "softplus" "softsign" "elu" "hard_sigmoid" ] use_bias=[<true> <false>] )<line_sep># Define a function that tests a model input_shape=(10 )<line_sep>num_channels=10<def_stmt>build_model x<block_start>kwargs=dict(zip(options.keys() x))<line_sep>model=Sequential()<line_sep>model.add(Dense(num_channels input_shape=input_shape **kwargs))<line_sep><return>x model<block_end># Iterate through all combinations product=itertools.product(*options.values())<line_sep>args=[build_model(p)<for>p product]<line_sep># Test the cases print("Testing a total of %s cases. This could take a while"%len(args))<for_stmt>param,model args<block_start>self._run_test(model param)<block_end><block_end>@pytest.mark.slow<def_stmt>test_upsample_layer_params self<block_start>options=dict(size=[(2 2) (3 3) (4 4) (5 5)])<line_sep>np.random.seed(1988)<line_sep>input_dim=10<line_sep>input_shape=(input_dim input_dim 1)<line_sep>X=np.random.rand(1 *input_shape)<line_sep># Define a function that tests a model <def_stmt>build_model x<block_start>kwargs=dict(zip(options.keys() x))<line_sep>model=Sequential()<line_sep>model.add(Conv2D(filters=5 kernel_size=(7 7) input_shape=input_shape))<line_sep>model.add(UpSampling2D(**kwargs))<line_sep><return>x model<block_end># Iterate through all combinations product=itertools.product(*options.values())<line_sep>args=[build_model(p)<for>p product]<line_sep># Test the cases print("Testing a total of %s cases. This could take a while"%len(args))<for_stmt>param,model args<block_start>self._run_test(model param)<block_end><block_end>@pytest.mark.slow<def_stmt>test_conv_layer_params self model_precision=_MLMODEL_FULL_PRECISION<block_start>options=dict(activation=["relu" "tanh" "sigmoid" ] # keras does not support softmax on 4-D use_bias=[<true> <false>] padding=["same" "valid"] filters=[1 3 5] kernel_size=[[5 5]] # fails when sizes are different )<line_sep># Define a function that tests a model input_shape=(10 10 1)<def_stmt>build_model x<block_start>kwargs=dict(zip(options.keys() x))<line_sep>model=Sequential()<line_sep>model.add(Conv2D(input_shape=input_shape **kwargs))<line_sep><return>x model<block_end># Iterate through all combinations product=itertools.product(*options.values())<line_sep>args=[build_model(p)<for>p product]<line_sep># Test the cases print("Testing a total of %s cases. This could take a while"%len(args))<for_stmt>param,model args<block_start>self._run_test(model param model_precision=model_precision)<block_end><block_end>@pytest.mark.keras2<def_stmt>test_conv_layer_params_half_precision self<block_start><return>self.test_conv_layer_params(model_precision=_MLMODEL_HALF_PRECISION)<block_end>@pytest.mark.slow<def_stmt>test_dense_elementwise_params self<block_start>options=dict(modes=[add multiply concatenate average maximum])<def_stmt>build_model mode<block_start>x1=Input(shape=(3 ))<line_sep>x2=Input(shape=(3 ))<line_sep>y1=Dense(4)(x1)<line_sep>y2=Dense(4)(x2)<line_sep>z=mode([y1 y2])<line_sep>model=Model([x1 x2] z)<line_sep><return>mode model<block_end>product=itertools.product(*options.values())<line_sep>args=[build_model(p[0])<for>p product]<line_sep>print("Testing a total of %s cases. This could take a while"%len(args))<for_stmt>param,model args<block_start>self._run_test(model param)<block_end><block_end><def_stmt>test_vgg_16_tiny self<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(ZeroPadding2D((1 1) input_shape=input_shape))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(32 activation="relu"))<line_sep>model.add(Dropout(0.5))<line_sep>model.add(Dense(32 activation="relu"))<line_sep>model.add(Dropout(0.5))<line_sep>model.add(Dense(1000))# activation='softmax')) # Set some random weights model.set_weights([(np.random.rand(*w.shape)-0.5)<times>0.2<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_vgg_16_tiny_no_pooling self<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(ZeroPadding2D((1 1) input_shape=input_shape))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(ZeroPadding2D((1 1)))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(MaxPooling2D((2 2) strides=(2 2)))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(32 activation="relu"))<line_sep># model.add(Dropout(0.5)) model.add(Dense(32 activation="relu"))<line_sep># model.add(Dropout(0.5)) model.add(Dense(1000))# activation='softmax')) # Set some random weights model.set_weights([(np.random.rand(*w.shape)-0.5)<times>0.2<for>w model.get_weights()])<line_sep># Get the coreml model self._test_model(model)<block_end><def_stmt>test_vgg_16_tiny_no_pooling_no_padding self model_precision=_MLMODEL_FULL_PRECISION<block_start>input_shape=(48 48 3)<line_sep>model=Sequential()<line_sep>model.add(Conv2D(32 (3 3) activation="relu" input_shape=input_shape))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Conv2D(32 (3 3) activation="relu"))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(32 activation="relu"))<line_sep>model.add(Dropout(0.5))<line_sep>model.add(Dense(32 activation="relu"))<line_sep>model.add(Dropout(0.5))<line_sep>model.add(Dense(1000 activation="softmax"))<line_sep># Get the coreml model self._test_model(model model_precision=model_precision)<block_end><def_stmt>test_vgg_16_tiny_no_pooling_no_padding_half_precision self<block_start><return>self.test_vgg_16_tiny_no_pooling_no_padding(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_imdb_fasttext_first_2 self<block_start>max_features=10<line_sep>max_len=6<line_sep>embedding_dims=4<line_sep>pool_length=2<line_sep>model=Sequential()<line_sep>model.add(Embedding(max_features embedding_dims input_length=max_len))<line_sep># we add a AveragePooling1D, which will average the embeddings # of all words in the document model.add(AveragePooling1D(pool_size=pool_length))<line_sep>self._test_model(model one_dim_seq_flags=[<true>])<block_end><def_stmt>test_tiny_mcrnn_td self<block_start>model=Sequential()<line_sep>model.add(Conv2D(3 (1 1) input_shape=(2 4 4) padding="same"))<line_sep>model.add(AveragePooling2D(pool_size=(2 2)))<line_sep>model.add(Reshape((2 3)))<line_sep>model.add(TimeDistributed(Dense(5)))<line_sep>self._test_model(model)<block_end><def_stmt>test_tiny_mcrnn_recurrent self<block_start>model=Sequential()<line_sep>model.add(Conv2D(3 (1 1) input_shape=(2 4 4) padding="same"))<line_sep>model.add(AveragePooling2D(pool_size=(2 2)))<line_sep>model.add(Reshape((2 3)))<line_sep>model.add(LSTM(5 recurrent_activation="sigmoid"))<line_sep>self._test_model(model)<block_end><def_stmt>test_tiny_mcrnn_music_tagger self<block_start>x_in=Input(shape=(4 6 1))<line_sep>x=ZeroPadding2D(padding=(0 1))(x_in)<line_sep>x=BatchNormalization(axis=2 name="bn_0_freq")(x)<line_sep># Conv block 1 x=Conv2D(2 (3 3) padding="same" name="conv1")(x)<line_sep>x=BatchNormalization(axis=3 name="bn1")(x)<line_sep>x=Activation("elu")(x)<line_sep>x=MaxPooling2D(pool_size=(2 2) strides=(2 2) name="pool1")(x)<line_sep># Conv block 2 x=Conv2D(4 (3 3) padding="same" name="conv2")(x)<line_sep>x=BatchNormalization(axis=3 name="bn2")(x)<line_sep>x=Activation("elu")(x)<line_sep>x=MaxPooling2D(pool_size=(2 2) strides=(2 2) name="pool2")(x)<line_sep># Should get you (1,1,2,4) x=Reshape((2 4))(x)<line_sep>x=GRU(32 return_sequences=<true> name="gru1")(x)<line_sep>x=GRU(32 return_sequences=<false> name="gru2")(x)<line_sep># Create model. model=Model(x_in x)<line_sep>model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep>self._test_model(model mode="random_zero_mean" delta=1e-2)<block_end><def_stmt>test_tiny_apple_manual self<block_start>model=Sequential()<line_sep>model.add(LSTM(3 input_shape=(4 5) recurrent_activation="sigmoid"))<line_sep>model.add(Dense(5))<line_sep>model.add(Activation("softmax"))<line_sep>self._test_model(model)<block_end><def_stmt>test_tiny_image_captioning_image_branch self<block_start>img_input_1=Input(shape=(16 16 3))<line_sep>x=Conv2D(2 (3 3))(img_input_1)<line_sep>x=Flatten()(x)<line_sep>img_model=Model(inputs=[img_input_1] outputs=[x])<line_sep>img_input=Input(shape=(16 16 3))<line_sep>x=img_model(img_input)<line_sep>x=Dense(8 name="cap_dense")(x)<line_sep>x=Reshape((1 8) name="cap_reshape")(x)<line_sep>image_branch=Model(inputs=[img_input] outputs=[x])<line_sep>self._test_model(image_branch)<block_end><def_stmt>test_tiny_image_captioning_feature_merge self<block_start>img_input_1=Input(shape=(16 16 3))<line_sep>x=Conv2D(2 (3 3))(img_input_1)<line_sep>x=Flatten()(x)<line_sep>img_model=Model([img_input_1] [x])<line_sep>img_input=Input(shape=(16 16 3))<line_sep>x=img_model(img_input)<line_sep>x=Dense(8 name="cap_dense")(x)<line_sep>x=Reshape((1 8) name="cap_reshape")(x)<line_sep>sentence_input=Input(shape=(5 ))# max_length = 5 y=Embedding(8 8 name="cap_embedding")(sentence_input)<line_sep>z=concatenate([x y] axis=1 name="cap_merge")<line_sep>combined_model=Model(inputs=[img_input sentence_input] outputs=[z])<line_sep>self._test_model(combined_model one_dim_seq_flags=[<false> <true>])<block_end><def_stmt>test_tiny_image_captioning self# use a conv layer as a image feature branch <block_start>img_input_1=Input(shape=(16 16 3))<line_sep>x=Conv2D(2 (3 3))(img_input_1)<line_sep>x=Flatten()(x)<line_sep>img_model=Model(inputs=[img_input_1] outputs=[x])<line_sep>img_input=Input(shape=(16 16 3))<line_sep>x=img_model(img_input)<line_sep>x=Dense(8 name="cap_dense")(x)<line_sep>x=Reshape((1 8) name="cap_reshape")(x)<line_sep>sentence_input=Input(shape=(5 ))# max_length = 5 y=Embedding(8 8 name="cap_embedding")(sentence_input)<line_sep>z=concatenate([x y] axis=1 name="cap_merge")<line_sep>z=LSTM(4 return_sequences=<true> name="cap_lstm")(z)<line_sep>z=TimeDistributed(Dense(8) name="cap_timedistributed")(z)<line_sep>combined_model=Model(inputs=[img_input sentence_input] outputs=[z])<line_sep>self._test_model(combined_model one_dim_seq_flags=[<false> <true>])<block_end><def_stmt>test_tiny_babi_rnn self<block_start>vocab_size=10<line_sep>embed_hidden_size=8<line_sep>story_maxlen=5<line_sep>query_maxlen=5<line_sep>input_tensor_1=Input(shape=(story_maxlen ))<line_sep>x1=Embedding(vocab_size embed_hidden_size)(input_tensor_1)<line_sep>x1=Dropout(0.3)(x1)<line_sep>input_tensor_2=Input(shape=(query_maxlen ))<line_sep>x2=Embedding(vocab_size embed_hidden_size)(input_tensor_2)<line_sep>x2=Dropout(0.3)(x2)<line_sep>x2=LSTM(embed_hidden_size return_sequences=<false>)(x2)<line_sep>x2=RepeatVector(story_maxlen)(x2)<line_sep>x3=add([x1 x2])<line_sep>x3=LSTM(embed_hidden_size return_sequences=<false>)(x3)<line_sep>x3=Dropout(0.3)(x3)<line_sep>x3=Dense(vocab_size activation="softmax")(x3)<line_sep>model=Model(inputs=[input_tensor_1 input_tensor_2] outputs=[x3])<line_sep>self._test_model(model one_dim_seq_flags=[<true> <true>])<block_end><def_stmt>test_clickbait_cnn self model_precision=_MLMODEL_FULL_PRECISION# from: https://github.com/saurabhmathur96/clickbait-detector <block_start>vocabulary_size=500<line_sep>embedding_dimension=30<line_sep>input_length=20<line_sep>model=Sequential()<line_sep>model.add(Embedding(vocabulary_size embedding_dimension input_length=input_length trainable=<true> ))<line_sep>model.add(Conv1D(32 2))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Activation("relu"))<line_sep>model.add(Conv1D(32 2))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Activation("relu"))<line_sep>model.add(Conv1D(32 2))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Activation("relu"))<line_sep>model.add(MaxPooling1D(17))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(1 use_bias=<true>))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Activation("sigmoid"))<line_sep>self._test_model(model one_dim_seq_flags=[<true>] model_precision=model_precision)<block_end><def_stmt>test_clickbait_cnn_half_precision self<block_start><return>self.test_clickbait_cnn(model_precision=_MLMODEL_HALF_PRECISION)<block_end><def_stmt>test_model_with_duplicated_edges self# Create a simple model <block_start>inputs=Input(shape=(20 20))<line_sep>activation=Activation("relu")(inputs)<line_sep>cropping=Cropping1D(cropping=(1 1))(activation)<line_sep>conv1d=Conv1D(20 3 padding="valid")(activation)<line_sep>ouputs=Add()([conv1d cropping])<line_sep>model=Model(inputs ouputs)<line_sep>self._test_model(model)<block_end><block_end>@unittest.skipIf(<not>_HAS_KERAS2_TF "Missing keras. Skipping tests.")@pytest.mark.keras2<class_stmt>KerasBasicConversionTest(KerasNumericCorrectnessTest)<block_start><def_stmt>test_float_arraytype_flag self<block_start>np.random.seed(1988)<line_sep># Define a model model=Sequential()<line_sep>model.add(Dense(1000 input_shape=(100 )))<line_sep># Set some random weights model.set_weights([np.random.rand(*w.shape)<for>w model.get_weights()])<line_sep># Convert model <import_from_stmt>coremltools.converters keras<as>keras_converter<line_sep>coreml_model=keras_converter.convert(model use_float_arraytype=<true>)<line_sep>spec=coreml_model.get_spec()<import_from_stmt>coremltools.proto Model_pb2<as>_Model_pb2<line_sep>self.assertEqual(spec.description.input[0].type.multiArrayType.dataType _Model_pb2.ArrayFeatureType.FLOAT32 )<line_sep>self.assertEqual(spec.description.output[0].type.multiArrayType.dataType _Model_pb2.ArrayFeatureType.FLOAT32 )<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<line_sep># suite = unittest.TestSuite() # suite.addTest(KerasBasicNumericCorrectnessTest("test_lstm_concat_dense_random")) # unittest.TextTestRunner().run(suite) <block_end>
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Installing test models against a session."""<import_from_stmt>builtins object<import_from_stmt>collections defaultdict<import_from_stmt>google.cloud.forseti.services utils<class_stmt>ModelCreatorClient(object)<block_start>"""Model creator client."""<def_stmt>__init__ self session data_access<block_start>self.session=session<line_sep>self.data_access=data_access<line_sep>self.explain=self<block_end><def_stmt>add_resource self resource_type_name parent_type_name no_parent<block_start><return>self.data_access.add_resource_by_name(self.session resource_type_name parent_type_name no_parent)<block_end><def_stmt>add_member self child parents<block_start><return>self.data_access.add_member(self.session child parents)<block_end><def_stmt>add_role self role_name permissions<block_start><return>self.data_access.add_role_by_name(self.session role_name permissions)<block_end><def_stmt>get_iam_policy self full_resource_name<block_start>policy_dict=self.data_access.get_iam_policy(self.session utils.full_to_type_name(full_resource_name))<class_stmt>PolicyAccessor(dict)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(PolicyAccessor self).__init__(*args **kwargs)<line_sep>self.policy=self<line_sep>self.bindings=self['bindings']<if>'bindings'<in>self<else>[]<line_sep>self.etag=self['etag']<if>'etag'<in>self<else><none><block_end><block_end><return>PolicyAccessor(policy_dict)<block_end><def_stmt>set_iam_policy self full_resource_name policy<block_start><return>self.data_access.set_iam_policy(self.session utils.full_to_type_name(full_resource_name) policy update_members=<true>)<block_end><def_stmt>expand_special_members self<block_start>self.data_access.expand_special_members(self.session)<block_end><def_stmt>commit self<block_start>self.session.commit()<line_sep>self.data_access.denorm_group_in_group(self.session)<line_sep>self.session.commit()<block_end><block_end><class_stmt>ModelCreator(object)<block_start>"""Model creator."""<def_stmt>__init__ self model client<block_start>self._install_model(model client)<line_sep>client.commit()<block_end><def_stmt>_install_model self model client<block_start>self._install_resources(model['resources'] client)<line_sep>self._install_memberships(model['memberships'] client)<line_sep>self._install_roles(model['roles'] client)<line_sep>self._install_bindings(model['bindings'] client)<block_end><def_stmt>_recursive_install_resources self node model client parent<block_start>"""Install resources."""<line_sep>client.add_resource(node parent bool(<not>parent))<for_stmt>root,tree model.items()<block_start>self._recursive_install_resources(root tree client node)<block_end><block_end><def_stmt>_install_resources self model_view client<block_start>"""Install resources."""<for_stmt>root,tree model_view.items()<block_start>self._recursive_install_resources(root tree client '')<block_end><block_end><def_stmt>_recursive_invert_membership self node model parentship<block_start><if_stmt>node<not><in>parentship<block_start>parentship[node]=set()<block_end><for_stmt>child model.keys()<block_start>parentship[child].add(node)<block_end><for_stmt>root,tree model.items()<block_start>self._recursive_invert_membership(root tree parentship)<block_end><return>parentship<block_end><def_stmt>_cyclic self g<block_start>path=set()<line_sep>visited=set()<def_stmt>visit vertex<block_start><if_stmt>vertex<in>visited<block_start><return><false><block_end>visited.add(vertex)<line_sep>path.add(vertex)<for_stmt>neighbour g.get(vertex ())<block_start><if_stmt>neighbour<in>path<or>visit(neighbour)<block_start><return><true><block_end><block_end>path.remove(vertex)<line_sep><return><false><block_end><return>any(visit(v)<for>v g)<block_end><def_stmt>_install_memberships self model_view client<block_start>parent_relationship=defaultdict(set)<for_stmt>root,tree model_view.items()<block_start>self._recursive_invert_membership(root tree parent_relationship)<block_end><if_stmt>self._cyclic(parent_relationship)<block_start><raise>Exception('Cyclic membership relation not supported!')<block_end>installed_members=set()<while_stmt>len(parent_relationship)<g>0<block_start><for_stmt>child,parents parent_relationship.items()<block_start><if_stmt>parents.issubset(installed_members)<block_start><break><block_end><block_end>installed_members.add(child)<line_sep>client.add_member(child list(parents))<line_sep>parent_relationship.pop(child)<block_end><block_end><def_stmt>_install_roles self model_view client<block_start><for_stmt>role,permissions model_view.items()<block_start>client.add_role(role permissions)<block_end><block_end><def_stmt>_install_bindings self model_view client<block_start><for_stmt>resource_name,bindings model_view.items()<block_start>reply=client.get_iam_policy(resource_name)<if_stmt>len(reply.policy.bindings)<g>0<block_start><raise>Exception('policy should have been empty')<block_end>client.set_iam_policy(resource_name {'bindings':bindings 'etag':reply.policy.etag})<block_end>client.expand_special_members()<block_end><block_end>
<import_stmt>numpy<as>np<line_sep># Keras <import_from_stmt>keras.applications.inception_v3 InceptionV3<import_from_stmt>keras.applications.vgg16 VGG16<import_from_stmt>keras.models Model<import_from_stmt>keras.layers Input Dense Dropout LSTM Embedding concatenate RepeatVector TimeDistributed Bidirectional<import_from_stmt>keras.preprocessing.sequence pad_sequences<import_from_stmt>tqdm tqdm<line_sep># To measure BLEU Score <import_from_stmt>nltk.translate.bleu_score corpus_bleu<line_sep>""" *Define the CNN model """<def_stmt>CNNModel model_type<block_start><if_stmt>model_type<eq>'inceptionv3'<block_start>model=InceptionV3()<block_end><elif_stmt>model_type<eq>'vgg16'<block_start>model=VGG16()<block_end>model.layers.pop()<line_sep>model=Model(inputs=model.inputs outputs=model.layers[-1].output)<line_sep><return>model<block_end>""" *Define the RNN model """<def_stmt>RNNModel vocab_size max_len rnnConfig model_type<block_start>embedding_size=rnnConfig['embedding_size']<if_stmt>model_type<eq>'inceptionv3'# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model <block_start>image_input=Input(shape=(2048 ))<block_end><elif_stmt>model_type<eq>'vgg16'# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model <block_start>image_input=Input(shape=(4096 ))<block_end>image_model_1=Dropout(rnnConfig['dropout'])(image_input)<line_sep>image_model=Dense(embedding_size activation='relu')(image_model_1)<line_sep>caption_input=Input(shape=(max_len ))<line_sep># mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1=Embedding(vocab_size embedding_size mask_zero=<true>)(caption_input)<line_sep>caption_model_2=Dropout(rnnConfig['dropout'])(caption_model_1)<line_sep>caption_model=LSTM(rnnConfig['LSTM_units'])(caption_model_2)<line_sep># Merging the models and creating a softmax classifier final_model_1=concatenate([image_model caption_model])<line_sep>final_model_2=Dense(rnnConfig['dense_units'] activation='relu')(final_model_1)<line_sep>final_model=Dense(vocab_size activation='softmax')(final_model_2)<line_sep>model=Model(inputs=[image_input caption_input] outputs=final_model)<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam')<line_sep><return>model<block_end>""" *Define the RNN model with different architecture """<def_stmt>AlternativeRNNModel vocab_size max_len rnnConfig model_type<block_start>embedding_size=rnnConfig['embedding_size']<if_stmt>model_type<eq>'inceptionv3'# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model <block_start>image_input=Input(shape=(2048 ))<block_end><elif_stmt>model_type<eq>'vgg16'# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model <block_start>image_input=Input(shape=(4096 ))<block_end>image_model_1=Dense(embedding_size activation='relu')(image_input)<line_sep>image_model=RepeatVector(max_len)(image_model_1)<line_sep>caption_input=Input(shape=(max_len ))<line_sep># mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1=Embedding(vocab_size embedding_size mask_zero=<true>)(caption_input)<line_sep># Since we are going to predict the next word using the previous words # (length of previous words changes with every iteration over the caption), we have to set return_sequences = True. caption_model_2=LSTM(rnnConfig['LSTM_units'] return_sequences=<true>)(caption_model_1)<line_sep># caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2) caption_model=TimeDistributed(Dense(embedding_size))(caption_model_2)<line_sep># Merging the models and creating a softmax classifier final_model_1=concatenate([image_model caption_model])<line_sep># final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1) final_model_2=Bidirectional(LSTM(rnnConfig['LSTM_units'] return_sequences=<false>))(final_model_1)<line_sep># final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2) # final_model = Dense(vocab_size, activation='softmax')(final_model_3) final_model=Dense(vocab_size activation='softmax')(final_model_2)<line_sep>model=Model(inputs=[image_input caption_input] outputs=final_model)<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam')<line_sep># model.compile(loss='categorical_crossentropy', optimizer='rmsprop') <return>model<block_end>""" *Map an integer to a word """<def_stmt>int_to_word integer tokenizer<block_start><for_stmt>word,index tokenizer.word_index.items()<block_start><if_stmt>index<eq>integer<block_start><return>word<block_end><block_end><return><none><block_end>""" *Generate a caption for an image, given a pre-trained model and a tokenizer to map integer back to word *Uses simple argmax """<def_stmt>generate_caption model tokenizer image max_length# Seed the generation process <block_start>in_text='startseq'<line_sep># Iterate over the whole length of the sequence <for_stmt>_ range(max_length)# Integer encode input sequence <block_start>sequence=tokenizer.texts_to_sequences([in_text])[0]<line_sep># Pad input sequence=pad_sequences([sequence] maxlen=max_length)<line_sep># Predict next word # The model will output a prediction, which will be a probability distribution over all words in the vocabulary. yhat=model.predict([image sequence] verbose=0)<line_sep># The output vector representins a probability distribution where maximum probability is the predicted word position # Take output class with maximum probability and convert to integer yhat=np.argmax(yhat)<line_sep># Map integer back to word word=int_to_word(yhat tokenizer)<line_sep># Stop if we cannot map the word <if_stmt>word<is><none><block_start><break><block_end># Append as input for generating the next word in_text<augadd>' '+word<line_sep># Stop if we predict the end of the sequence <if_stmt>word<eq>'endseq'<block_start><break><block_end><block_end><return>in_text<block_end>""" *Generate a caption for an image, given a pre-trained model and a tokenizer to map integer back to word *Uses BEAM Search algorithm """<def_stmt>generate_caption_beam_search model tokenizer image max_length beam_index=3# in_text --> [[idx,prob]] ;prob=0 initially <block_start>in_text=[[tokenizer.texts_to_sequences(['startseq'])[0] 0.0]]<while_stmt>len(in_text[0][0])<l>max_length<block_start>tempList=[]<for_stmt>seq in_text<block_start>padded_seq=pad_sequences([seq[0]] maxlen=max_length)<line_sep>preds=model.predict([image padded_seq] verbose=0)<line_sep># Take top (i.e. which have highest probailities) `beam_index` predictions top_preds=np.argsort(preds[0])[-beam_index:]<line_sep># Getting the top `beam_index` predictions and <for_stmt>word top_preds<block_start>next_seq,prob=seq[0][:] seq[1]<line_sep>next_seq.append(word)<line_sep># Update probability prob<augadd>preds[0][word]<line_sep># Append as input for generating the next word tempList.append([next_seq prob])<block_end><block_end>in_text=tempList<line_sep># Sorting according to the probabilities in_text=sorted(in_text reverse=<false> key=<lambda>l:l[1])<line_sep># Take the top words in_text=in_text[-beam_index:]<block_end>in_text=in_text[-1][0]<line_sep>final_caption_raw=[int_to_word(i tokenizer)<for>i in_text]<line_sep>final_caption=[]<for_stmt>word final_caption_raw<block_start><if_stmt>word<eq>'endseq'<block_start><break><block_end><else_stmt><block_start>final_caption.append(word)<block_end><block_end>final_caption.append('endseq')<line_sep><return>' '.join(final_caption)<block_end>""" *Evaluate the model on BLEU Score using argmax predictions """<def_stmt>evaluate_model model images captions tokenizer max_length<block_start>actual,predicted=list() list()<for_stmt>image_id,caption_list tqdm(captions.items())<block_start>yhat=generate_caption(model tokenizer images[image_id] max_length)<line_sep>ground_truth=[caption.split()<for>caption caption_list]<line_sep>actual.append(ground_truth)<line_sep>predicted.append(yhat.split())<block_end>print('BLEU Scores :')<line_sep>print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')<line_sep>print('BLEU-1: %f'%corpus_bleu(actual predicted weights=(1.0 0 0 0)))<line_sep>print('BLEU-2: %f'%corpus_bleu(actual predicted weights=(0.5 0.5 0 0)))<line_sep>print('BLEU-3: %f'%corpus_bleu(actual predicted weights=(0.3 0.3 0.3 0)))<line_sep>print('BLEU-4: %f'%corpus_bleu(actual predicted weights=(0.25 0.25 0.25 0.25)))<block_end>""" *Evaluate the model on BLEU Score using BEAM search predictions """<def_stmt>evaluate_model_beam_search model images captions tokenizer max_length beam_index=3<block_start>actual,predicted=list() list()<for_stmt>image_id,caption_list tqdm(captions.items())<block_start>yhat=generate_caption_beam_search(model tokenizer images[image_id] max_length beam_index=beam_index)<line_sep>ground_truth=[caption.split()<for>caption caption_list]<line_sep>actual.append(ground_truth)<line_sep>predicted.append(yhat.split())<block_end>print('BLEU Scores :')<line_sep>print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')<line_sep>print('BLEU-1: %f'%corpus_bleu(actual predicted weights=(1.0 0 0 0)))<line_sep>print('BLEU-2: %f'%corpus_bleu(actual predicted weights=(0.5 0.5 0 0)))<line_sep>print('BLEU-3: %f'%corpus_bleu(actual predicted weights=(0.3 0.3 0.3 0)))<line_sep>print('BLEU-4: %f'%corpus_bleu(actual predicted weights=(0.25 0.25 0.25 0.25)))<block_end>
<import_stmt>random<import_from_stmt>.sqrtmod sqrtmod_prime_power has_sqrtmod_prime_power<import_from_stmt>.modular invmod<line_sep>__all__=('NULL_POINT' 'Curve')<line_sep>NULL_POINT=(<none> <none>)<class_stmt>Curve<block_start><def_stmt>__init__ self a b p g=<none> order=<none> cofactor=<none> seed=<none><block_start>self.a=a<line_sep>self.b=b<line_sep>self.module=p<line_sep>self.g=g<line_sep>self.order=order<line_sep>self.cofactor=cofactor<line_sep>self.seed=seed<line_sep>self.points_count=<none><if_stmt>self.cofactor<eq>1<and>self.order<is><not><none><block_start>self.points_count=self.order<block_end><return><none><block_end><def_stmt>is_null self p<block_start>""" Check if a point is curve's null point """<line_sep><return>p<eq>NULL_POINT<block_end><def_stmt>is_opposite self p1 p2<block_start>""" Check if one point is opposite to another (p1 == -p2) """<line_sep>x1,y1=p1<line_sep>x2,y2=p2<line_sep><return>(x1<eq>x2<and>y1<eq>-y2%self.module)<block_end><def_stmt>check self p<block_start>""" Check if point is on the curve """<line_sep>x,y=p<if_stmt>self.is_null(p)<block_start><return><true><block_end>left=(y<power>2)%self.module<line_sep>right=self.right(x)<line_sep><return>left<eq>right<block_end><def_stmt>check_x self x<block_start>""" Check if there is a point on the curve with given @x coordinate """<if_stmt>x<g>self.module<or>x<l>0<block_start><raise>ValueError("Value "+str(x)+" is not in range [0; <modulus>]")<block_end>a=self.right(x)<line_sep>n=self.module<if_stmt><not>has_sqrtmod_prime_power(a n)<block_start><return><false><block_end>ys=sqrtmod_prime_power(a n)<line_sep><return>map(<lambda>y:(x y) ys)<block_end><def_stmt>right self x<block_start>""" Right part of the curve equation: x^3 + a*x + b (mod p) """<line_sep><return>(x<power>3+self.a<times>x+self.b)%self.module<block_end><def_stmt>find_points_in_range self start=0 end=<none><block_start>""" List of points in given range for x coordinate """<line_sep>points=[]<if_stmt>end<is><none><block_start>end=self.module-1<block_end><for_stmt>x range(start end+1)<block_start>p=self.check_x(x)<if_stmt><not>p<block_start><continue><block_end>points.extend(p)<block_end><return>points<block_end><def_stmt>find_points_rand self number=1<block_start>""" List of @number random points on the curve """<line_sep>points=[]<while_stmt>len(points)<l>number<block_start>x=random.randint(0 self.module)<line_sep>p=self.check_x(x)<if_stmt><not>p<block_start><continue><block_end>points.append(p)<block_end><return>points<block_end><def_stmt>add self p1 p2<block_start>""" Sum of two points """<if_stmt>self.is_null(p1)<block_start><return>p2<block_end><if_stmt>self.is_null(p2)<block_start><return>p1<block_end><if_stmt>self.is_opposite(p1 p2)<block_start><return>NULL_POINT<block_end>x1,y1=p1<line_sep>x2,y2=p2<line_sep>l=0<if_stmt>x1<ne>x2<block_start>l=(y2-y1)<times>invmod(x2-x1 self.module)<block_end><else_stmt><block_start>l=(3<times>x1<power>2+self.a)<times>invmod(2<times>y1 self.module)<block_end>x=(l<times>l-x1-x2)%self.module<line_sep>y=(l<times>(x1-x)-y1)%self.module# yes, it's that new x <return>(x y)<block_end><def_stmt>power self p n<block_start>""" n✕P or (P + P + ... + P) n times """<if_stmt>n<eq>0<or>self.is_null(p)<block_start><return>NULL_POINT<block_end>res=NULL_POINT<while_stmt>n<block_start><if_stmt>n&1<block_start>res=self.add(res p)<block_end>p=self.add(p p)<line_sep>n<augrshift>1<block_end><return>res<block_end><def_stmt>generate self n<block_start>""" Too lazy to give self.g to self.power """<line_sep><return>self.power(self.g n)<block_end><def_stmt>get_order self p limit=<none><block_start>""" Tries to calculate order of @p, returns None if @limit is reached (SLOW method) """<line_sep>order=1<line_sep>res=p<while_stmt><not>self.is_null(res)<block_start>res=self.add(res p)<line_sep>order<augadd>1<if_stmt>limit<is><not><none><and>order<ge>limit<block_start><return><none><block_end><block_end><return>order<block_end><block_end>
""" Functions for manipulating or otherwise processing strings """<import_stmt>base64<import_stmt>difflib<import_stmt>errno<import_stmt>fnmatch<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>shlex<import_stmt>time<import_stmt>unicodedata<import_from_stmt>salt.utils.decorators.jinja jinja_filter<line_sep>log=logging.getLogger(__name__)<line_sep>@jinja_filter("to_bytes")<def_stmt>to_bytes s encoding=<none> errors="strict"<block_start>""" Given bytes, bytearray, str, or unicode (python 2), return bytes (str for python 2) """<if_stmt>encoding<is><none># Try utf-8 first, and fall back to detected encoding <block_start>encoding=("utf-8" __salt_system_encoding__)<block_end><if_stmt><not>isinstance(encoding (tuple list))<block_start>encoding=(encoding )<block_end><if_stmt><not>encoding<block_start><raise>ValueError("encoding cannot be empty")<block_end>exc=<none><if_stmt>isinstance(s bytes)<block_start><return>s<block_end><if_stmt>isinstance(s bytearray)<block_start><return>bytes(s)<block_end><if_stmt>isinstance(s str)<block_start><for_stmt>enc encoding<block_start><try_stmt><block_start><return>s.encode(enc errors)<block_end><except_stmt>UnicodeEncodeError<as>err<block_start>exc=err<line_sep><continue><block_end><block_end># The only way we get this far is if a UnicodeEncodeError was # raised, otherwise we would have already returned (or raised some # other exception). <raise>exc# pylint: disable=raising-bad-type <block_end><raise>TypeError("expected str, bytes, or bytearray not {}".format(type(s)))<block_end><def_stmt>to_str s encoding=<none> errors="strict" normalize=<false><block_start>""" Given str, bytes, bytearray, or unicode (py2), return str """<def_stmt>_normalize s<block_start><try_stmt><block_start><return>unicodedata.normalize("NFC" s)<if>normalize<else>s<block_end><except_stmt>TypeError<block_start><return>s<block_end><block_end><if_stmt>encoding<is><none># Try utf-8 first, and fall back to detected encoding <block_start>encoding=("utf-8" __salt_system_encoding__)<block_end><if_stmt><not>isinstance(encoding (tuple list))<block_start>encoding=(encoding )<block_end><if_stmt><not>encoding<block_start><raise>ValueError("encoding cannot be empty")<block_end><if_stmt>isinstance(s str)<block_start><return>_normalize(s)<block_end>exc=<none><if_stmt>isinstance(s (bytes bytearray))<block_start><for_stmt>enc encoding<block_start><try_stmt><block_start><return>_normalize(s.decode(enc errors))<block_end><except_stmt>UnicodeDecodeError<as>err<block_start>exc=err<line_sep><continue><block_end><block_end># The only way we get this far is if a UnicodeDecodeError was # raised, otherwise we would have already returned (or raised some # other exception). <raise>exc# pylint: disable=raising-bad-type <block_end><raise>TypeError("expected str, bytes, or bytearray not {}".format(type(s)))<block_end><def_stmt>to_unicode s encoding=<none> errors="strict" normalize=<false><block_start>""" Given str or unicode, return unicode (str for python 3) """<def_stmt>_normalize s<block_start><return>unicodedata.normalize("NFC" s)<if>normalize<else>s<block_end><if_stmt>encoding<is><none># Try utf-8 first, and fall back to detected encoding <block_start>encoding=("utf-8" __salt_system_encoding__)<block_end><if_stmt><not>isinstance(encoding (tuple list))<block_start>encoding=(encoding )<block_end><if_stmt><not>encoding<block_start><raise>ValueError("encoding cannot be empty")<block_end><if_stmt>isinstance(s str)<block_start><return>_normalize(s)<block_end><elif_stmt>isinstance(s (bytes bytearray))<block_start><return>_normalize(to_str(s encoding errors))<block_end><raise>TypeError("expected str, bytes, or bytearray not {}".format(type(s)))<block_end>@jinja_filter("str_to_num")@jinja_filter("to_num")<def_stmt>to_num text<block_start>""" Convert a string to a number. Returns an integer if the string represents an integer, a floating point number if the string is a real number, or the string unchanged otherwise. """<try_stmt><block_start><return>int(text)<block_end><except_stmt>ValueError<block_start><try_stmt><block_start><return>float(text)<block_end><except_stmt>ValueError<block_start><return>text<block_end><block_end><block_end><def_stmt>to_none text<block_start>""" Convert a string to None if the string is empty or contains only spaces. """<if_stmt>str(text).strip()<block_start><return>text<block_end><return><none><block_end><def_stmt>is_quoted value<block_start>""" Return a single or double quote, if a string is wrapped in extra quotes. Otherwise return an empty string. """<line_sep>ret=""<if_stmt>(isinstance(value str)<and>value[0]<eq>value[-1]<and>value.startswith(("'" '"')))<block_start>ret=value[0]<block_end><return>ret<block_end><def_stmt>dequote value<block_start>""" Remove extra quotes around a string. """<if_stmt>is_quoted(value)<block_start><return>value[1:-1]<block_end><return>value<block_end>@jinja_filter("is_hex")<def_stmt>is_hex value<block_start>""" Returns True if value is a hexadecimal string, otherwise returns False """<try_stmt><block_start>int(value 16)<line_sep><return><true><block_end><except_stmt>(TypeError ValueError)<block_start><return><false><block_end><block_end><def_stmt>is_binary data<block_start>""" Detects if the passed string of data is binary or text """<if_stmt><not>data<or><not>isinstance(data ((str ) bytes))<block_start><return><false><block_end><if_stmt>isinstance(data bytes)<block_start><if_stmt>b"\0"<in>data<block_start><return><true><block_end><block_end><elif_stmt>"\0"<in>data<block_start><return><true><block_end>text_characters="".join([chr(x)<for>x range(32 127)]+list("\n\r\t\b"))<line_sep># Get the non-text characters (map each character to itself then use the # 'remove' option to get rid of the text characters.) <if_stmt>isinstance(data bytes)<block_start><import_stmt>salt.utils.data<line_sep>nontext=data.translate(<none> salt.utils.data.encode(text_characters))<block_end><else_stmt><block_start>trans="".maketrans("" "" text_characters)<line_sep>nontext=data.translate(trans)<block_end># If more than 30% non-text characters, then # this is considered binary data <if_stmt>float(len(nontext))/len(data)<g>0.30<block_start><return><true><block_end><return><false><block_end>@jinja_filter("random_str")<def_stmt>random size=32<block_start>key=os.urandom(size)<line_sep><return>to_unicode(base64.b64encode(key).replace(b"\n" b"")[:size])<block_end>@jinja_filter("contains_whitespace")<def_stmt>contains_whitespace text<block_start>""" Returns True if there are any whitespace characters in the string """<line_sep><return>any(x.isspace()<for>x text)<block_end><def_stmt>human_to_bytes size<block_start>""" Given a human-readable byte string (e.g. 2G, 30M), return the number of bytes. Will return 0 if the argument has unexpected form. .. versionadded:: 2018.3.0 """<line_sep>sbytes=size[:-1]<line_sep>unit=size[-1]<if_stmt>sbytes.isdigit()<block_start>sbytes=int(sbytes)<if_stmt>unit<eq>"P"<block_start>sbytes<augmul>1125899906842624<block_end><elif_stmt>unit<eq>"T"<block_start>sbytes<augmul>1099511627776<block_end><elif_stmt>unit<eq>"G"<block_start>sbytes<augmul>1073741824<block_end><elif_stmt>unit<eq>"M"<block_start>sbytes<augmul>1048576<block_end><else_stmt><block_start>sbytes=0<block_end><block_end><else_stmt><block_start>sbytes=0<block_end><return>sbytes<block_end><def_stmt>build_whitespace_split_regex text<block_start>''' Create a regular expression at runtime which should match ignoring the addition or deletion of white space or line breaks, unless between commas Example: .. code-block:: python >>> import re >>> import salt.utils.stringutils >>> regex = salt.utils.stringutils.build_whitespace_split_regex( ... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then""" ... ) >>> regex '(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian' '\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?' '\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?' 'then(?:[\\s]+)?' >>> re.search( ... regex, ... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then""" ... ) <_sre.SRE_Match object at 0xb70639c0> >>> '''<def_stmt>__build_parts text<block_start>lexer=shlex.shlex(text)<line_sep>lexer.whitespace_split=<true><line_sep>lexer.commenters=""<if_stmt>r"'\""<in>text<block_start>lexer.quotes=""<block_end><elif_stmt>"'"<in>text<block_start>lexer.quotes='"'<block_end><elif_stmt>'"'<in>text<block_start>lexer.quotes="'"<block_end><return>list(lexer)<block_end>regex=r""<for_stmt>line text.splitlines()<block_start>parts=[re.escape(s)<for>s __build_parts(line)]<line_sep>regex<augadd>r"(?:[\s]+)?{}(?:[\s]+)?".format(r"(?:[\s]+)?".join(parts))<block_end><return>r"(?m)^{}$".format(regex)<block_end><def_stmt>expr_match line expr<block_start>""" Checks whether or not the passed value matches the specified expression. Tries to match expr first as a glob using fnmatch.fnmatch(), and then tries to match expr as a regular expression. Originally designed to match minion IDs for whitelists/blacklists. Note that this also does exact matches, as fnmatch.fnmatch() will return ``True`` when no glob characters are used and the string is an exact match: .. code-block:: python >>> fnmatch.fnmatch('foo', 'foo') True """<try_stmt><block_start><if_stmt>fnmatch.fnmatch(line expr)<block_start><return><true><block_end><try_stmt><block_start><if_stmt>re.match(r"\A{}\Z".format(expr) line)<block_start><return><true><block_end><block_end><except_stmt>re.error<block_start><pass><block_end><block_end><except_stmt>TypeError<block_start>log.exception("Value %r or expression %r is not a string" line expr)<block_end><return><false><block_end>@jinja_filter("check_whitelist_blacklist")<def_stmt>check_whitelist_blacklist value whitelist=<none> blacklist=<none><block_start>""" Check a whitelist and/or blacklist to see if the value matches it. value The item to check the whitelist and/or blacklist against. whitelist The list of items that are white-listed. If ``value`` is found in the whitelist, then the function returns ``True``. Otherwise, it returns ``False``. blacklist The list of items that are black-listed. If ``value`` is found in the blacklist, then the function returns ``False``. Otherwise, it returns ``True``. If both a whitelist and a blacklist are provided, value membership in the blacklist will be examined first. If the value is not found in the blacklist, then the whitelist is checked. If the value isn't found in the whitelist, the function returns ``False``. """<line_sep># Normalize the input so that we have a list <if_stmt>blacklist<block_start><if_stmt>isinstance(blacklist str)<block_start>blacklist=[blacklist]<block_end><if_stmt><not>hasattr(blacklist "__iter__")<block_start><raise>TypeError("Expecting iterable blacklist, but got {} ({})".format(type(blacklist).__name__ blacklist))<block_end><block_end><else_stmt><block_start>blacklist=[]<block_end><if_stmt>whitelist<block_start><if_stmt>isinstance(whitelist str)<block_start>whitelist=[whitelist]<block_end><if_stmt><not>hasattr(whitelist "__iter__")<block_start><raise>TypeError("Expecting iterable whitelist, but got {} ({})".format(type(whitelist).__name__ whitelist))<block_end><block_end><else_stmt><block_start>whitelist=[]<block_end>_blacklist_match=any(expr_match(value expr)<for>expr blacklist)<line_sep>_whitelist_match=any(expr_match(value expr)<for>expr whitelist)<if_stmt>blacklist<and><not>whitelist# Blacklist but no whitelist <block_start><return><not>_blacklist_match<block_end><elif_stmt>whitelist<and><not>blacklist# Whitelist but no blacklist <block_start><return>_whitelist_match<block_end><elif_stmt>blacklist<and>whitelist# Both whitelist and blacklist <block_start><return><not>_blacklist_match<and>_whitelist_match<block_end><else_stmt># No blacklist or whitelist passed <block_start><return><true><block_end><block_end><def_stmt>check_include_exclude path_str include_pat=<none> exclude_pat=<none><block_start>""" Check for glob or regexp patterns for include_pat and exclude_pat in the 'path_str' string and return True/False conditions as follows. - Default: return 'True' if no include_pat or exclude_pat patterns are supplied - If only include_pat or exclude_pat is supplied: return 'True' if string passes the include_pat test or fails exclude_pat test respectively - If both include_pat and exclude_pat are supplied: return 'True' if include_pat matches AND exclude_pat does not match """<def_stmt>_pat_check path_str check_pat<block_start><if_stmt>re.match("E@" check_pat)<block_start><return><true><if>re.search(check_pat[2:] path_str)<else><false><block_end><else_stmt><block_start><return><true><if>fnmatch.fnmatch(path_str check_pat)<else><false><block_end><block_end>ret=<true># -- default true # Before pattern match, check if it is regexp (E@'') or glob(default) <if_stmt>include_pat<block_start><if_stmt>isinstance(include_pat list)<block_start><for_stmt>include_line include_pat<block_start>retchk_include=_pat_check(path_str include_line)<if_stmt>retchk_include<block_start><break><block_end><block_end><block_end><else_stmt><block_start>retchk_include=_pat_check(path_str include_pat)<block_end><block_end><if_stmt>exclude_pat<block_start><if_stmt>isinstance(exclude_pat list)<block_start><for_stmt>exclude_line exclude_pat<block_start>retchk_exclude=<not>_pat_check(path_str exclude_line)<if_stmt><not>retchk_exclude<block_start><break><block_end><block_end><block_end><else_stmt><block_start>retchk_exclude=<not>_pat_check(path_str exclude_pat)<block_end><block_end># Now apply include/exclude conditions <if_stmt>include_pat<and><not>exclude_pat<block_start>ret=retchk_include<block_end><elif_stmt>exclude_pat<and><not>include_pat<block_start>ret=retchk_exclude<block_end><elif_stmt>include_pat<and>exclude_pat<block_start>ret=retchk_include<and>retchk_exclude<block_end><else_stmt><block_start>ret=<true><block_end><return>ret<block_end><def_stmt>print_cli msg retries=10 step=0.01<block_start>""" Wrapper around print() that suppresses tracebacks on broken pipes (i.e. when salt output is piped to less and less is stopped prematurely). """<while_stmt>retries<block_start><try_stmt><block_start><try_stmt><block_start>print(msg)<block_end><except_stmt>UnicodeEncodeError<block_start>print(msg.encode("utf-8"))<block_end><block_end><except_stmt>OSError<as>exc<block_start>err="{}".format(exc)<if_stmt>exc.errno<ne>errno.EPIPE<block_start><if_stmt>("temporarily unavailable"<in>err<or>exc.errno<in>(errno.EAGAIN ))<and>retries<block_start>time.sleep(step)<line_sep>retries<augsub>1<line_sep><continue><block_end><else_stmt><block_start><raise><block_end><block_end><block_end><break><block_end><block_end><def_stmt>get_context template line num_lines=5 marker=<none><block_start>""" Returns debugging context around a line in a given string Returns:: string """<line_sep>template_lines=template.splitlines()<line_sep>num_template_lines=len(template_lines)<line_sep># In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template <if_stmt>line<g>num_template_lines<block_start><return>template<block_end>context_start=max(0 line-num_lines-1)# subt 1 for 0-based indexing context_end=min(num_template_lines line+num_lines)<line_sep>error_line_in_context=line-context_start-1# subtr 1 for 0-based idx buf=[]<if_stmt>context_start<g>0<block_start>buf.append("[...]")<line_sep>error_line_in_context<augadd>1<block_end>buf.extend(template_lines[context_start:context_end])<if_stmt>context_end<l>num_template_lines<block_start>buf.append("[...]")<block_end><if_stmt>marker<block_start>buf[error_line_in_context]<augadd>marker<block_end><return>"---\n{}\n---".format("\n".join(buf))<block_end><def_stmt>get_diff a b *args **kwargs<block_start>""" Perform diff on two iterables containing lines from two files, and return the diff as as string. Lines are normalized to str types to avoid issues with unicode on PY2. """<line_sep>encoding=("utf-8" "latin-1" __salt_system_encoding__)<line_sep># Late import to avoid circular import <import_stmt>salt.utils.data<line_sep><return>"".join(difflib.unified_diff(salt.utils.data.decode_list(a encoding=encoding) salt.utils.data.decode_list(b encoding=encoding) *args **kwargs))<block_end>@jinja_filter("to_snake_case")<def_stmt>camel_to_snake_case camel_input<block_start>""" Converts camelCase (or CamelCase) to snake_case. From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case :param str camel_input: The camelcase or CamelCase string to convert to snake_case :return str """<line_sep>res=camel_input[0].lower()<for_stmt>i,letter enumerate(camel_input[1:] 1)<block_start><if_stmt>letter.isupper()<block_start><if_stmt>camel_input[i-1].islower()<or>(i<ne>len(camel_input)-1<and>camel_input[i+1].islower())<block_start>res<augadd>"_"<block_end><block_end>res<augadd>letter.lower()<block_end><return>res<block_end>@jinja_filter("to_camelcase")<def_stmt>snake_to_camel_case snake_input uppercamel=<false><block_start>""" Converts snake_case to camelCase (or CamelCase if uppercamel is ``True``). Inspired by https://codereview.stackexchange.com/questions/85311/transform-snake-case-to-camelcase :param str snake_input: The input snake_case string to convert to camelCase :param bool uppercamel: Whether or not to convert to CamelCase instead :return str """<line_sep>words=snake_input.split("_")<if_stmt>uppercamel<block_start>words[0]=words[0].capitalize()<block_end><return>words[0]+"".join(word.capitalize()<for>word words[1:])<block_end>
<import_from_stmt>collections OrderedDict<import_stmt>uuid<import_from_stmt>typing List<import_from_stmt>Bot.EntryExitSettings EntryExitSettings<import_from_stmt>Bot.TradeEnums Side<import_from_stmt>Bot.StopLossSettings StopLossSettings<import_from_stmt>Bot.Target *<class_stmt>Trade(CustomSerializable)# def __init__(self, symbol, side, asset, status=None, sl_settings=None, entry=None, exit=None): <block_start><def_stmt>__init__ self symbol side asset status=<none> *args **kvargs<block_start>self.side=Side(side.lower())<line_sep>self.symbol=symbol.upper()<line_sep>self.asset=asset.upper()<line_sep>self.entry:EntryExitSettings=<none><line_sep>self.exit:EntryExitSettings=<none><line_sep>self._init_entry_exit(<true> kvargs.get('entry') self.side)<line_sep>self._init_entry_exit(<false> kvargs.get('exit') self.side)<line_sep>sl_settings=kvargs.get('stoploss' kvargs.get('sl_settings'))<line_sep># self.sl_settings: StopLossSettings = StopLossSettings(**sl_settings) if sl_settings else None self.sl_settings:StopLossSettings=StopLossSettings(**sl_settings)<if>sl_settings<else>StopLossSettings({})<if_stmt>status<block_start>self.status=OrderStatus(status.lower())<block_end><else_stmt><block_start>self.status=OrderStatus.ACTIVE<if><not>kvargs.get('entry')<else>OrderStatus.NEW<block_end>self.cap=float(kvargs.get('cap'))<if>kvargs.get('cap')<else><none><line_sep>self.id=kvargs.get('id' <none>)<if_stmt><not>self.id<block_start>self.id=str(uuid.uuid4())<block_end><block_end><def_stmt>_init_entry_exit self is_entry data side:Side<block_start><if_stmt>data<block_start><if_stmt>'side'<not><in>data<block_start>data['side']=(side.reverse()<if>is_entry<else>side).value<block_end># TODO: right now there is only Smart Entry option allowed <if_stmt>is_entry<block_start>data['smart']=<true><line_sep>self.entry=EntryExitSettings(is_entry=is_entry **data)<block_end><else_stmt><block_start>self.exit=EntryExitSettings(is_entry=is_entry **data)<block_end><block_end><block_end><def_stmt>get_cap self available_balance<block_start><return>min(self.cap<if>self.cap<else>available_balance available_balance)<block_end><def_stmt>is_sell self<block_start><return>self.side.is_sell()<block_end><def_stmt>has_entry self<block_start><return>self.entry<is><not><none><block_end><def_stmt>has_exit self<block_start><return>self.exit<is><not><none><block_end><def_stmt>has_stoploss self<block_start><return>self.sl_settings<is><not><none><and>self.sl_settings.initial_target<block_end><def_stmt>has_stoploss_in_last_completed_target self<block_start>completed_targets=self.get_completed_exit_targets()<line_sep>has_completed_targets=len(completed_targets)<g>0<line_sep><return>has_completed_targets<and>completed_targets[-1].has_custom_stop()<block_end><def_stmt>get_completed_exit_targets self<arrow>List[Target]<block_start><if_stmt><not>self.exit<block_start><return>[]<block_end><return>self.exit.get_completed_targets()<block_end><def_stmt>get_initial_stop self<arrow>Target<block_start><if_stmt>self.sl_settings<block_start><return>self.sl_settings.initial_target<block_end><return><none><block_end><def_stmt>serializable_dict self<block_start>d=OrderedDict()<line_sep>d['id']=self.id<line_sep>d['asset']=self.asset<line_sep>d['symbol']=self.symbol<line_sep>d['side']=self.side<line_sep>d['status']=self.status<if_stmt>self.cap<block_start>d['cap']=self.format_float(self.cap)<block_end><if_stmt>self.entry<block_start>d['entry']=self.entry<block_end><if_stmt>self.exit<block_start>d['exit']=self.exit<block_end><if_stmt>self.sl_settings<block_start>d['stoploss']=self.sl_settings<block_end><return>d<block_end><def_stmt>get_all_active_placed_targets self<arrow>List[Target]<block_start>tgt=[]<if_stmt>self.has_exit()<block_start>tgt.extend(self.exit.targets)<block_end><if_stmt>self.has_entry()<block_start>tgt.extend(self.entry.targets)<block_end><if_stmt>self.has_stoploss()<block_start>tgt.append(self.sl_settings.initial_target)<block_end><return>[t<for>t tgt<if><not>t.is_completed()<and>t.has_id()]<block_end><def_stmt>is_completed self<block_start><return>self.status.is_completed()<block_end><def_stmt>is_active self<block_start><return>self.status.is_active()<block_end><def_stmt>is_new self<block_start><return>self.status.is_new()<block_end><def_stmt>is_removed self<block_start><return>self.status.is_removed()<block_end><def_stmt>set_active self<block_start>self.status=OrderStatus.ACTIVE<block_end><def_stmt>set_completed self<block_start>self.status=OrderStatus.COMPLETED<block_end><def_stmt>set_removed self<block_start>self.status=OrderStatus.REMOVED<block_end><def_stmt>__str__ self<block_start><return>'{}({}): {}'.format(self.symbol self.id self.side)<block_end><def_stmt>describe self<block_start>description=self.__str__()<if_stmt>self.has_entry()<block_start>description<augadd>'\n'+self.entry.describe()<block_end><if_stmt>self.has_exit()<block_start>description<augadd>'\n'+self.exit.describe()<block_end><if_stmt>self.has_stoploss()<block_start>description<augadd>'\n'+self.sl_settings.describe()<block_end><return>description<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_log log<as>logging<import_from_stmt>tacker.common log<line_sep>LOG=logging.getLogger(__name__)<class_stmt>HOTUpdater(object)<block_start>"""Update HOT template."""<def_stmt>__init__ self heatclient<block_start>self.heatclient=heatclient<line_sep>self.template={}<line_sep>self.nested_templates=dict()<block_end>@log.log<def_stmt>get_templates_from_stack self stack_id<block_start>"""Get template information from the stack. Get the template from stack specified by stack_id, if stack has scalable resource, get the its child template. """<def_stmt>_get_resource name resources<block_start><for_stmt>resource resources<block_start><if_stmt>resource.resource_name<eq>name<block_start><return>resource<block_end><block_end><block_end>self.template=self.heatclient.stacks.template(stack_id)<line_sep>LOG.debug('got main template for stack({}). template={}'.format(stack_id self.template))<line_sep>stack_resources=self.heatclient.resource_get_list(stack_id nested_depth=2)<for_stmt>resource stack_resources<block_start><if_stmt>resource.resource_type<eq>'OS::Heat::AutoScalingGroup'<block_start>intermediate_template=self.heatclient.stacks.template(resource.physical_resource_id)<for_stmt>resource_id intermediate_template['resources'].keys()<block_start>corresponding_resource=_get_resource(resource_id stack_resources)<line_sep>nested_template=self.heatclient.stacks.template(corresponding_resource.physical_resource_id)<line_sep>LOG.debug('got nested template for stack({}). template={}'.format(corresponding_resource.physical_resource_id nested_template))<block_end><if_stmt>nested_template<block_start>self.nested_templates[corresponding_resource.resource_type]=nested_template<block_end><block_end><block_end><block_end>@log.log<def_stmt>update_resource_property self resource_id resource_types=[] **kwargs<block_start>"""Update attributes of resource properties. Get the resource information from template's resources section, and update properties using kwargs information. If resource type does not include in resource_types, nothing to do. """<def_stmt>_update template resource_id resource_types kwargs<block_start>resource=template.get('resources' {}).get(resource_id)<if_stmt><not>resource<block_start><return><block_end><if_stmt>resource.get('type' {})<not><in>resource_types<block_start><return><block_end>resource_properties=resource.get('properties' {})<if_stmt><not>resource_properties<block_start><return><block_end><for_stmt>key,value kwargs.items()<block_start><if_stmt>value<is><not><none><block_start>resource_properties.update({key:value})<block_end><elif_stmt>resource_properties.get(key)<block_start><del_stmt>resource_properties[key]<block_end><block_end><block_end>_update(self.template resource_id resource_types kwargs)<for_stmt>value self.nested_templates.values()<block_start>nested_template=value<line_sep>_update(nested_template resource_id resource_types kwargs)<block_end><block_end><block_end>
<import_from_stmt>copy deepcopy<import_stmt>math<import_stmt>itertools<import_stmt>numpy<as>np<import_from_stmt>skmultiflow.core BaseSKMObject ClassifierMixin MetaEstimatorMixin<import_from_stmt>skmultiflow.drift_detection.base_drift_detector BaseDriftDetector<import_from_stmt>skmultiflow.drift_detection ADWIN<import_from_stmt>skmultiflow.trees.arf_hoeffding_tree ARFHoeffdingTreeClassifier<import_from_stmt>skmultiflow.metrics ClassificationPerformanceEvaluator<import_from_stmt>skmultiflow.utils get_dimensions normalize_values_in_dict check_random_state check_weights<import_stmt>warnings<def_stmt>AdaptiveRandomForest n_estimators=10 max_features='auto' disable_weighted_vote=<false> lambda_value=6 performance_metric='acc' drift_detection_method:BaseDriftDetector=ADWIN(0.001) warning_detection_method:BaseDriftDetector=ADWIN(0.01) max_byte_size=33554432 memory_estimate_period=2000000 grace_period=50 split_criterion='info_gain' split_confidence=0.01 tie_threshold=0.05 binary_split=<false> stop_mem_management=<false> remove_poor_atts=<false> no_preprune=<false> leaf_prediction='nba' nb_threshold=0 nominal_attributes=<none> random_state=<none># pragma: no cover <block_start>warnings.warn("’AdaptiveRandomForest’ has been renamed to ‘AdaptiveRandomForestClassifier’ "<concat>"in v0.5.0.\nThe old name will be removed in v0.7.0" category=FutureWarning)<line_sep><return>AdaptiveRandomForestClassifier(n_estimators=n_estimators max_features=max_features disable_weighted_vote=disable_weighted_vote lambda_value=lambda_value performance_metric=performance_metric drift_detection_method=drift_detection_method warning_detection_method=warning_detection_method max_byte_size=max_byte_size memory_estimate_period=memory_estimate_period grace_period=grace_period split_criterion=split_criterion split_confidence=split_confidence tie_threshold=tie_threshold binary_split=binary_split stop_mem_management=stop_mem_management remove_poor_atts=remove_poor_atts no_preprune=no_preprune leaf_prediction=leaf_prediction nb_threshold=nb_threshold nominal_attributes=nominal_attributes random_state=random_state)<block_end><class_stmt>AdaptiveRandomForestClassifier(BaseSKMObject ClassifierMixin MetaEstimatorMixin)<block_start>"""Adaptive Random Forest classifier. Parameters ---------- n_estimators: int, optional (default=10) Number of trees in the ensemble. max_features : int, float, string or None, optional (default="auto") Max number of attributes for each node split. - If int, then consider ``max_features`` features at each split. - If float, then ``max_features`` is a percentage and ``int(max_features * n_features)`` features are considered at each split. - If "auto", then ``max_features=sqrt(n_features)``. - If "sqrt", then ``max_features=sqrt(n_features)`` (same as "auto"). - If "log2", then ``max_features=log2(n_features)``. - If None, then ``max_features=n_features``. disable_weighted_vote: bool, optional (default=False) Weighted vote option. lambda_value: int, optional (default=6) The lambda value for bagging (lambda=6 corresponds to Leverage Bagging). performance_metric: string, optional (default="acc") Metric used to track trees performance within the ensemble. - 'acc' - Accuracy - 'kappa' - Accuracy drift_detection_method: BaseDriftDetector or None, optional (default=ADWIN(0.001)) Drift Detection method. Set to None to disable Drift detection. warning_detection_method: BaseDriftDetector or None, default(ADWIN(0.01)) Warning Detection method. Set to None to disable warning detection. max_byte_size: int, optional (default=33554432) (`ARFHoeffdingTreeClassifier` parameter) Maximum memory consumed by the tree. memory_estimate_period: int, optional (default=2000000) (`ARFHoeffdingTreeClassifier` parameter) Number of instances between memory consumption checks. grace_period: int, optional (default=50) (`ARFHoeffdingTreeClassifier` parameter) Number of instances a leaf should observe between split attempts. split_criterion: string, optional (default='info_gain') (`ARFHoeffdingTreeClassifier` parameter) Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain split_confidence: float, optional (default=0.01) (`ARFHoeffdingTreeClassifier` parameter) Allowed error in split decision, a value closer to 0 takes longer to decide. tie_threshold: float, optional (default=0.05) (`ARFHoeffdingTreeClassifier` parameter) Threshold below which a split will be forced to break ties. binary_split: bool, optional (default=False) (`ARFHoeffdingTreeClassifier` parameter) If True, only allow binary splits. stop_mem_management: bool, optional (default=False) (`ARFHoeffdingTreeClassifier` parameter) If True, stop growing as soon as memory limit is hit. remove_poor_atts: bool, optional (default=False) (`ARFHoeffdingTreeClassifier` parameter) If True, disable poor attributes. no_preprune: bool, optional (default=False) (`ARFHoeffdingTreeClassifier` parameter) If True, disable pre-pruning. leaf_prediction: string, optional (default='nba') (`ARFHoeffdingTreeClassifier` parameter) Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive nb_threshold: int, optional (default=0) (`ARFHoeffdingTreeClassifier` parameter) Number of instances a leaf should observe before allowing Naive Bayes. nominal_attributes: list, optional (`ARFHoeffdingTreeClassifier` parameter) List of Nominal attributes. If emtpy, then assume that all attributes are numerical. random_state: int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Notes ----- The 3 most important aspects of Adaptive Random Forest [1]_ are: (1) inducing diversity through re-sampling; (2) inducing diversity through randomly selecting subsets of features for node splits (see skmultiflow.classification.trees.arf_hoeffding_tree); (3) drift detectors per base tree, which cause selective resets in response to drifts. It also allows training background trees, which start training if a warning is detected and replace the active tree if the warning escalates to a drift. References ---------- .. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Adaptive random forests for evolving data stream classification. In Machine Learning, DOI: 10.1007/s10994-017-5642-8, Springer, 2017. Examples -------- >>> # Imports >>> from skmultiflow.data import SEAGenerator >>> from skmultiflow.meta import AdaptiveRandomForestClassifier >>> >>> # Setting up a data stream >>> stream = SEAGenerator(random_state=1) >>> >>> # Setup Adaptive Random Forest Classifier >>> arf = AdaptiveRandomForestClassifier() >>> >>> # Setup variables to control loop and track performance >>> n_samples = 0 >>> correct_cnt = 0 >>> max_samples = 200 >>> >>> # Train the estimator with the samples provided by the data stream >>> while n_samples < max_samples and stream.has_more_samples(): >>> X, y = stream.next_sample() >>> y_pred = arf.predict(X) >>> if y[0] == y_pred[0]: >>> correct_cnt += 1 >>> arf.partial_fit(X, y) >>> n_samples += 1 >>> >>> # Display results >>> print('Adaptive Random Forest ensemble classifier example') >>> print('{} samples analyzed.'.format(n_samples)) >>> print('Accuracy: {}'.format(correct_cnt / n_samples)) """<def_stmt>__init__ self n_estimators=10 max_features='auto' disable_weighted_vote=<false> lambda_value=6 performance_metric='acc' drift_detection_method:BaseDriftDetector=ADWIN(0.001) warning_detection_method:BaseDriftDetector=ADWIN(0.01) max_byte_size=33554432 memory_estimate_period=2000000 grace_period=50 split_criterion='info_gain' split_confidence=0.01 tie_threshold=0.05 binary_split=<false> stop_mem_management=<false> remove_poor_atts=<false> no_preprune=<false> leaf_prediction='nba' nb_threshold=0 nominal_attributes=<none> random_state=<none><block_start>"""AdaptiveRandomForestClassifier class constructor."""<line_sep>super().__init__()<line_sep>self.n_estimators=n_estimators<line_sep>self.max_features=max_features<line_sep>self.disable_weighted_vote=disable_weighted_vote<line_sep>self.lambda_value=lambda_value<if_stmt>isinstance(drift_detection_method BaseDriftDetector)<block_start>self.drift_detection_method=drift_detection_method<block_end><else_stmt><block_start>self.drift_detection_method=<none><block_end><if_stmt>isinstance(warning_detection_method BaseDriftDetector)<block_start>self.warning_detection_method=warning_detection_method<block_end><else_stmt><block_start>self.warning_detection_method=<none><block_end>self.instances_seen=0<line_sep>self.classes=<none><line_sep>self._train_weight_seen_by_model=0.0<line_sep>self.ensemble=<none><line_sep>self.random_state=random_state<line_sep>self._random_state=check_random_state(self.random_state)# Actual random_state object <if_stmt>performance_metric<in>['acc' 'kappa']<block_start>self.performance_metric=performance_metric<block_end><else_stmt><block_start><raise>ValueError('Invalid performance metric: {}'.format(performance_metric))<block_end># ARH Hoeffding Tree configuration self.max_byte_size=max_byte_size<line_sep>self.memory_estimate_period=memory_estimate_period<line_sep>self.grace_period=grace_period<line_sep>self.split_criterion=split_criterion<line_sep>self.split_confidence=split_confidence<line_sep>self.tie_threshold=tie_threshold<line_sep>self.binary_split=binary_split<line_sep>self.stop_mem_management=stop_mem_management<line_sep>self.remove_poor_atts=remove_poor_atts<line_sep>self.no_preprune=no_preprune<line_sep>self.leaf_prediction=leaf_prediction<line_sep>self.nb_threshold=nb_threshold<line_sep>self.nominal_attributes=nominal_attributes<block_end><def_stmt>partial_fit self X y classes=<none> sample_weight=<none><block_start>""" Partially (incrementally) fit the model. Parameters ---------- X : numpy.ndarray of shape (n_samples, n_features) The features to train the model. y: numpy.ndarray of shape (n_samples) An array-like with the class labels of all samples in X. classes: numpy.ndarray, list, optional (default=None) Array with all possible/known class labels. This is an optional parameter, except for the first partial_fit call where it is compulsory. sample_weight: numpy.ndarray of shape (n_samples), optional (default=None) Samples weight. If not provided, uniform weights are assumed. Returns ------- self """<if_stmt>self.classes<is><none><and>classes<is><not><none><block_start>self.classes=classes<block_end><if_stmt>sample_weight<is><none><block_start>weight=1.0<block_end><else_stmt><block_start>weight=sample_weight<block_end><if_stmt>y<is><not><none><block_start>row_cnt,_=get_dimensions(X)<line_sep>weight=check_weights(weight expand_length=row_cnt)<for_stmt>i range(row_cnt)<block_start><if_stmt>weight[i]<ne>0.0<block_start>self._train_weight_seen_by_model<augadd>weight[i]<line_sep>self._partial_fit(X[i] y[i] self.classes weight[i])<block_end><block_end><block_end><return>self<block_end><def_stmt>_partial_fit self X y classes=<none> sample_weight=1.0<block_start>self.instances_seen<augadd>1<if_stmt>self.ensemble<is><none><block_start>self._init_ensemble(X)<block_end><for_stmt>i range(self.n_estimators)<block_start>y_predicted=self.ensemble[i].predict(np.asarray([X]))<line_sep>self.ensemble[i].evaluator.add_result(y_predicted y sample_weight)<line_sep>k=self._random_state.poisson(self.lambda_value)<if_stmt>k<g>0<block_start>self.ensemble[i].partial_fit(np.asarray([X]) np.asarray([y]) classes=classes sample_weight=np.asarray([k]) instances_seen=self.instances_seen)<block_end><block_end><block_end><def_stmt>predict self X<block_start>""" Predict classes for the passed data. Parameters ---------- X : numpy.ndarray of shape (n_samples, n_features) The set of data samples to predict the class labels for. Returns ------- A numpy.ndarray with all the predictions for the samples in X. """<line_sep>y_proba=self.predict_proba(X)<line_sep>n_rows=y_proba.shape[0]<line_sep>y_pred=np.zeros(n_rows dtype=int)<for_stmt>i range(n_rows)<block_start>index=np.argmax(y_proba[i])<line_sep>y_pred[i]=index<block_end><return>y_pred<block_end><def_stmt>predict_proba self X<block_start>""" Estimates the probability of each sample in X belonging to each of the class-labels. Class probabilities are calculated as the mean predicted class probabilities per base estimator. Parameters ---------- X: numpy.ndarray of shape (n_samples, n_features) Samples for which we want to predict the class probabilities. Returns ------- numpy.ndarray of shape (n_samples, n_classes) Predicted class probabilities for all instances in X. If class labels were specified in a `partial_fit` call, the order of the columns matches `self.classes`. If classes were not specified, they are assumed to be 0-indexed. Class probabilities for a sample shall sum to 1 as long as at least one estimators has non-zero predictions. If no estimator can predict probabilities, probabilities of 0 are returned. """<if_stmt>self.ensemble<is><none><block_start>self._init_ensemble(X)<block_end>r,_=get_dimensions(X)<line_sep>y_proba=[]<for_stmt>i range(r)<block_start>votes=deepcopy(self._get_votes_for_instance(X[i]))<if_stmt>votes<eq>{}# Estimator is empty, all classes equal, default to zero <block_start>y_proba.append([0])<block_end><else_stmt><block_start><if_stmt>sum(votes.values())<ne>0<block_start>votes=normalize_values_in_dict(votes)<block_end><if_stmt>self.classes<is><not><none><block_start>votes_array=np.zeros(int(max(self.classes))+1)<block_end><else_stmt><block_start>votes_array=np.zeros(int(max(votes.keys()))+1)<block_end><for_stmt>key,value votes.items()<block_start>votes_array[int(key)]=value<block_end>y_proba.append(votes_array)<block_end><block_end># Set result as np.array <if_stmt>self.classes<is><not><none><block_start>y_proba=np.asarray(y_proba)<block_end><else_stmt># Fill missing values related to unobserved classes to ensure we get a 2D array <block_start>y_proba=np.asarray(list(itertools.zip_longest(*y_proba fillvalue=0.0))).T<block_end><return>y_proba<block_end><def_stmt>reset self<block_start>"""Reset ARF."""<line_sep>self.ensemble=<none><line_sep>self.instances_seen=0<line_sep>self._train_weight_seen_by_model=0.0<line_sep>self._random_state=check_random_state(self.random_state)<block_end><def_stmt>_get_votes_for_instance self X<block_start><if_stmt>self.ensemble<is><none><block_start>self._init_ensemble(X)<block_end>combined_votes={}<for_stmt>i range(self.n_estimators)<block_start>vote=deepcopy(self.ensemble[i]._get_votes_for_instance(X))<if_stmt>vote<ne>{}<and>sum(vote.values())<g>0<block_start>vote=normalize_values_in_dict(vote inplace=<true>)<if_stmt><not>self.disable_weighted_vote<block_start>performance=self.ensemble[i].evaluator.accuracy_score()<if>self.performance_metric<eq>'acc'<else>self.ensemble[i].evaluator.kappa_score()<if_stmt>performance<ne>0.0# CHECK How to handle negative (kappa) values? <block_start><for_stmt>k vote<block_start>vote[k]=vote[k]<times>performance<block_end><block_end><block_end># Add values <for_stmt>k vote<block_start><try_stmt><block_start>combined_votes[k]<augadd>vote[k]<block_end><except_stmt>KeyError<block_start>combined_votes[k]=vote[k]<block_end><block_end><block_end><block_end><return>combined_votes<block_end><def_stmt>_init_ensemble self X<block_start>self._set_max_features(get_dimensions(X)[1])<line_sep>self.ensemble=[ARFBaseLearner(index_original=i classifier=ARFHoeffdingTreeClassifier(max_byte_size=self.max_byte_size memory_estimate_period=self.memory_estimate_period grace_period=self.grace_period split_criterion=self.split_criterion split_confidence=self.split_confidence tie_threshold=self.tie_threshold binary_split=self.binary_split stop_mem_management=self.stop_mem_management remove_poor_atts=self.remove_poor_atts no_preprune=self.no_preprune leaf_prediction=self.leaf_prediction nb_threshold=self.nb_threshold nominal_attributes=self.nominal_attributes max_features=self.max_features random_state=self.random_state) instances_seen=self.instances_seen drift_detection_method=self.drift_detection_method warning_detection_method=self.warning_detection_method is_background_learner=<false>)<for>i range(self.n_estimators)]<block_end><def_stmt>_set_max_features self n<block_start><if_stmt>self.max_features<eq>'auto'<or>self.max_features<eq>'sqrt'<block_start>self.max_features=round(math.sqrt(n))<block_end><elif_stmt>self.max_features<eq>'log2'<block_start>self.max_features=round(math.log2(n))<block_end><elif_stmt>isinstance(self.max_features int)# Consider 'max_features' features at each split. <block_start><pass><block_end><elif_stmt>isinstance(self.max_features float)# Consider 'max_features' as a percentage <block_start>self.max_features=int(self.max_features<times>n)<block_end><elif_stmt>self.max_features<is><none><block_start>self.max_features=n<block_end><else_stmt># Default to "auto" <block_start>self.max_features=round(math.sqrt(n))<block_end># Sanity checks # max_features is negative, use max_features + n <if_stmt>self.max_features<l>0<block_start>self.max_features<augadd>n<block_end># max_features <= 0 # (m can be negative if max_features is negative and abs(max_features) > n), # use max_features = 1 <if_stmt>self.max_features<le>0<block_start>self.max_features=1<block_end># max_features > n, then use n <if_stmt>self.max_features<g>n<block_start>self.max_features=n<block_end><block_end><block_end><class_stmt>ARFBaseLearner(BaseSKMObject)<block_start>"""ARF Base Learner class. Parameters ---------- index_original: int Tree index within the ensemble. classifier: ARFHoeffdingTreeClassifier Tree classifier. instances_seen: int Number of instances seen by the tree. drift_detection_method: BaseDriftDetector Drift Detection method. warning_detection_method: BaseDriftDetector Warning Detection method. is_background_learner: bool True if the tree is a background learner. Notes ----- Inner class that represents a single tree member of the forest. Contains analysis information, such as the numberOfDriftsDetected. """<def_stmt>__init__ self index_original classifier:ARFHoeffdingTreeClassifier instances_seen drift_detection_method:BaseDriftDetector warning_detection_method:BaseDriftDetector is_background_learner<block_start>self.index_original=index_original<line_sep>self.classifier=classifier<line_sep>self.created_on=instances_seen<line_sep>self.is_background_learner=is_background_learner<line_sep>self.evaluator_method=ClassificationPerformanceEvaluator<line_sep># Drift and warning self.drift_detection_method=drift_detection_method<line_sep>self.warning_detection_method=warning_detection_method<line_sep>self.last_drift_on=0<line_sep>self.last_warning_on=0<line_sep>self.nb_drifts_detected=0<line_sep>self.nb_warnings_detected=0<line_sep>self.drift_detection=<none><line_sep>self.warning_detection=<none><line_sep>self.background_learner=<none><line_sep>self._use_drift_detector=<false><line_sep>self._use_background_learner=<false><line_sep>self.evaluator=self.evaluator_method()<line_sep># Initialize drift and warning detectors <if_stmt>drift_detection_method<is><not><none><block_start>self._use_drift_detector=<true><line_sep>self.drift_detection=deepcopy(drift_detection_method)<block_end><if_stmt>warning_detection_method<is><not><none><block_start>self._use_background_learner=<true><line_sep>self.warning_detection=deepcopy(warning_detection_method)<block_end><block_end><def_stmt>reset self instances_seen<block_start><if_stmt>self._use_background_learner<and>self.background_learner<is><not><none><block_start>self.classifier=self.background_learner.classifier<line_sep>self.warning_detection=self.background_learner.warning_detection<line_sep>self.drift_detection=self.background_learner.drift_detection<line_sep>self.evaluator_method=self.background_learner.evaluator_method<line_sep>self.created_on=self.background_learner.created_on<line_sep>self.background_learner=<none><block_end><else_stmt><block_start>self.classifier.reset()<line_sep>self.created_on=instances_seen<line_sep>self.drift_detection.reset()<block_end>self.evaluator=self.evaluator_method()<block_end><def_stmt>partial_fit self X y classes sample_weight instances_seen<block_start>self.classifier.partial_fit(X y classes=classes sample_weight=sample_weight)<if_stmt>self.background_learner<block_start>self.background_learner.classifier.partial_fit(X y classes=classes sample_weight=sample_weight)<block_end><if_stmt>self._use_drift_detector<and><not>self.is_background_learner<block_start>correctly_classifies=self.classifier.predict(X)<eq>y<line_sep># Check for warning only if use_background_learner is active <if_stmt>self._use_background_learner<block_start>self.warning_detection.add_element(int(<not>correctly_classifies))<line_sep># Check if there was a change <if_stmt>self.warning_detection.detected_change()<block_start>self.last_warning_on=instances_seen<line_sep>self.nb_warnings_detected<augadd>1<line_sep># Create a new background tree classifier background_learner=self.classifier.new_instance()<line_sep># Create a new background learner object self.background_learner=ARFBaseLearner(self.index_original background_learner instances_seen self.drift_detection_method self.warning_detection_method <true>)<line_sep># Update the warning detection object for the current object # (this effectively resets changes made to the object while it # was still a background learner). self.warning_detection.reset()<block_end><block_end># Update the drift detection self.drift_detection.add_element(int(<not>correctly_classifies))<line_sep># Check if there was a change <if_stmt>self.drift_detection.detected_change()<block_start>self.last_drift_on=instances_seen<line_sep>self.nb_drifts_detected<augadd>1<line_sep>self.reset(instances_seen)<block_end><block_end><block_end><def_stmt>predict self X<block_start><return>self.classifier.predict(X)<block_end><def_stmt>_get_votes_for_instance self X<block_start><return>self.classifier._get_votes_for_instance(X)<block_end><block_end>
<import_from_future_stmt> print_function absolute_import division#makes KratosMultiphysics backward compatible with python 2.6 and 2.7 <import_stmt>sys<line_sep>kratos_benchmarking_path='../../../../benchmarking'<line_sep>sys.path.append(kratos_benchmarking_path)<import_stmt>benchmarking<line_sep>print("Building reference data for edgebased_PureConvection.py...")<line_sep>benchmarking.BuildReferenceData("edgebased_PureConvection.py" "test_pureconvectionsolver_benchmarking_ref.txt")<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("SKIM")<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring('/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/D266D139-D871-DE11-A709-001D09F28F0C.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/CA27788D-E871-DE11-8B46-001D09F276CF.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/AC5633B2-D471-DE11-9B3A-001D09F252F3.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/9CD957E7-D071-DE11-B6AE-001D09F252F3.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/94BF68F7-D171-DE11-902B-000423D986A8.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/7838FE1E-C771-DE11-9FD5-000423D98950.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/56632803-DD71-DE11-BAF5-000423D9870C.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/42A67CB9-E971-DE11-AA86-001D09F252F3.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/407225D3-D071-DE11-809B-001D09F297EF.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/3E5E1CF0-D271-DE11-AC2B-000423D94700.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/2C57E916-D071-DE11-AF0E-001D09F24E39.root' '/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/228896A5-E571-DE11-A60B-001D09F2AF96.root'))<line_sep>process.configurationMetadata=cms.untracked.PSet(version=cms.untracked.string('$Revision: 1.6 $') name=cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/DPGAnalysis/Skims/python/MultiMuon_cfg.py,v $') annotation=cms.untracked.string('CRAFT MultiMuon skim'))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1000))<line_sep>process.options=cms.untracked.PSet(wantSummary=cms.untracked.bool(<true>))<line_sep>process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')<line_sep>process.load('Configuration/StandardSequences/GeometryIdeal_cff')<line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>process.GlobalTag.globaltag='GR09_31X_V3P::All'<line_sep>process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")<line_sep>process.multiCosmicMuonFilter=cms.EDFilter("TrackCountFilter" src=cms.InputTag('cosmicMuonsBarrelOnly') minNumber=cms.uint32(5))<line_sep>process.multiLHCMuonFilter=cms.EDFilter("TrackCountFilter" src=cms.InputTag('lhcStandAloneMuonsBarrelOnly') minNumber=cms.uint32(5))<line_sep>process.out=cms.OutputModule("PoolOutputModule" outputCommands=cms.untracked.vstring('keep *' 'drop *_MEtoEDMConverter_*_*') SelectEvents=cms.untracked.PSet(SelectEvents=cms.vstring('multiCosmicMuonPath' 'multiLHCMuonPath')) dataset=cms.untracked.PSet(dataTier=cms.untracked.string('RECO') filterName=cms.untracked.string('multiCosmicMuon')) fileName=cms.untracked.string('/tmp/malgeri/multiMuon.root'))<line_sep>process.multiCosmicMuonPath=cms.Path(process.multiCosmicMuonFilter)<line_sep>process.multiLHCMuonPath=cms.Path(process.multiLHCMuonFilter)<line_sep>process.this_is_the_end=cms.EndPath(process.out)<line_sep>
"""migrate provenance to not null Revision ID: 469ece903d76 Revises: <PASSWORD> Create Date: 2021-05-02 09:48:57.061825 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.orm.session Session<line_sep># revision identifiers, used by Alembic. revision="469ece903d76"<line_sep>down_revision="<PASSWORD>"<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade <block_start>conn=op.get_bind()<line_sep>conn.execute(sa.text("UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"))<line_sep>conn.execute(sa.text("UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"))<line_sep>conn.execute(sa.text("UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"))<line_sep>conn.execute(sa.text("UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"))<line_sep>conn.execute(sa.text("UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"))<line_sep>conn.execute(sa.text("UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"))<line_sep>conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))<line_sep>conn.execute(sa.text("UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"))<block_end><def_stmt>downgrade <block_start><pass><block_end>
<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>os<import_stmt>shutil<import_from_stmt>pyhealth.models.sequence.dipole Dipole<import_from_stmt>pyhealth.models.sequence.lstm LSTM<import_from_stmt>pyhealth.models.sequence.gru GRU<import_from_stmt>pyhealth.models.sequence.embedgru EmbedGRU<import_from_stmt>pyhealth.models.sequence.retain Retain<import_from_stmt>pyhealth.models.sequence.raim RAIM<import_from_stmt>pyhealth.models.sequence.tlstm tLSTM<import_from_stmt>pyhealth.models.sequence.stagenet StageNet<import_from_stmt>pyhealth.models.sequence.xgboost_seq XGBoostSequence<import_from_stmt>pyhealth.models.sequence.rf RandomForest<import_from_stmt>pyhealth.data.expdata_generator sequencedata<as>expdata_generator<import_from_stmt>pyhealth.evaluation.evaluator func<import_stmt>sys<if_stmt>sys.version_info<ge>(3 6)<block_start><import_stmt>zipfile<block_end><else_stmt><block_start><import_stmt>zipfile36<as>zipfile<block_end><class_stmt>TestSequentialModel(unittest.TestCase)<block_start>expdata_id='test.sequence.model'<def_stmt>test_01 self<block_start><if_stmt>os.path.exists('./experiments_data')<is><false><block_start>os.mkdir('./experiments_data')<block_end><if_stmt>os.path.exists('./datasets/mimic')<is><false><block_start>z=zipfile.ZipFile("./datasets/mimic.zip" "r")<line_sep>seq_x=[]<line_sep>label_y=[]<for_stmt>filename z.namelist()<block_start>z.extract(filename './datasets')<block_end><block_end>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.get_exp_data(sel_task='mortality' data_root='./datasets/mimic')<block_end><def_stmt>test_02_lstm_cpu self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.lstm.gpu'<line_sep>clf=LSTM(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<false> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_lstm_gpu self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.lstm.cpu'<line_sep>clf=LSTM(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_gru self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.gru'<line_sep>clf=GRU(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_embedgru self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.embedgru'<line_sep>clf=EmbedGRU(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_dipole self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.dipole'<line_sep>clf=Dipole(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_retain self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.retain'<line_sep>clf=Retain(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_raim self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.raim'<line_sep>clf=RAIM(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_tlstm self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.tlstm'<line_sep>clf=tLSTM(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_stagenet self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.stagenet'<line_sep>clf=StageNet(expmodel_id=expmodel_id n_batchsize=20 use_gpu=<true> n_epoch=10)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_xgboost self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.xgboost'<line_sep>clf=XGBoostSequence(expmodel_id=expmodel_id)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_02_rm self<block_start>cur_dataset=expdata_generator(self.expdata_id)<line_sep>cur_dataset.load_exp_data()<line_sep>expmodel_id='test.randomforest'<line_sep>clf=RandomForest(expmodel_id=expmodel_id)<line_sep>clf.fit(cur_dataset.train cur_dataset.valid)<line_sep>clf.load_model()<line_sep>clf.inference(cur_dataset.test)<line_sep>pred_results=clf.get_results()<assert_stmt>np.shape(pred_results['hat_y'])<eq>np.shape(pred_results['y'])<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']).tolist()<assert_stmt><true><not><in>np.isnan(pred_results['hat_y']<times>0).tolist()<block_end><def_stmt>test_03_delete self<block_start>shutil.rmtree(os.path.join('./experiments_data' self.expdata_id))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.lstm.cpu'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.lstm.gpu'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.gru'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.embedgru'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.dipole'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.retain'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.raim'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.tlstm'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.stagenet'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.xgboost'))<line_sep>shutil.rmtree(os.path.join('./experiments_records' 'test.randomforest'))<block_end><block_end>
<import_from_stmt>qtpy QtWidgets QtCore<import_from_stmt>qtpy.QtCore Qt<class_stmt>AnnotationScene(QtWidgets.QGraphicsScene)<block_start>clickRequest=QtCore.Signal(int int bool)<def_stmt>__init__ self parent=<none><block_start>super(AnnotationScene self).__init__(parent)<line_sep>self.creating=<false><line_sep>self.polygon_items=[]<block_end><def_stmt>updatePolygonSize self<block_start><for_stmt>poly self.polygon_items<block_start><for_stmt>grip poly.m_items<block_start>grip.updateSize()<block_end><for_stmt>line poly.m_lines<block_start>line.updateWidth()<block_end><block_end><block_end><def_stmt>setCreating self creating=<true><block_start>self.creating=creating<block_end><def_stmt>mousePressEvent self ev<block_start>pos=ev.scenePos()<if_stmt><not>self.creating<and><not>self.hovering<block_start><if_stmt>ev.buttons()<in>[Qt.LeftButton Qt.RightButton]<block_start>self.clickRequest.emit(int(pos.x()) int(pos.y()) ev.buttons()<eq>Qt.LeftButton)<block_end><block_end><elif_stmt>self.creating<block_start>self.polygon_item.removeLastPoint()<line_sep>self.polygon_item.addPointLast(ev.scenePos())<line_sep># movable element self.polygon_item.addPointLast(ev.scenePos())<block_end>super(AnnotationScene self).mousePressEvent(ev)<block_end><def_stmt>mouseMoveEvent self ev<block_start><if_stmt>self.creating<block_start>self.polygon_item.movePoint(# self.polygon_item.number_of_points() - 1, ev.scenePos() len(self.polygon_item)-1 ev.scenePos() )<block_end>super(AnnotationScene self).mouseMoveEvent(ev)<block_end>@property<def_stmt>item_hovering self<block_start><for_stmt>poly self.polygon_items<block_start><if_stmt>poly.item_hovering<block_start><return><true><block_end><block_end><return><false><block_end>@property<def_stmt>polygon_hovering self<block_start><for_stmt>poly self.polygon_items<block_start><if_stmt>poly.polygon_hovering<block_start><return><true><block_end><block_end><return><false><block_end>@property<def_stmt>line_hovering self<block_start><for_stmt>poly self.polygon_items<block_start><if_stmt>poly.line_hovering<block_start><return><true><block_end><block_end><return><false><block_end>@property<def_stmt>hovering self<block_start><return>self.item_hovering<or>self.polygon_hovering<or>self.line_hovering<block_end><block_end>
<import_from_stmt>..adapters.couchdb CouchDB<import_from_stmt>.base NoSQLDialect<import_from_stmt>. dialects<line_sep>@dialects.register_for(CouchDB)<class_stmt>CouchDBDialect(NoSQLDialect)<block_start><def_stmt>_and self first second query_env={}<block_start><return>"(%s && %s)"%(self.expand(first query_env=query_env) self.expand(second query_env=query_env) )<block_end><def_stmt>_or self first second query_env={}<block_start><return>"(%s || %s)"%(self.expand(first query_env=query_env) self.expand(second query_env=query_env) )<block_end><def_stmt>eq self first second=<none> query_env={}<block_start><if_stmt>second<is><none><block_start><return>"(%s == null)"%self.expand(first query_env=query_env)<block_end><return>"(%s == %s)"%(self.expand(first query_env=query_env) self.expand(second first.type query_env=query_env) )<block_end><def_stmt>ne self first second=<none> query_env={}<block_start><if_stmt>second<is><none><block_start><return>"(%s != null)"%self.expand(first query_env=query_env)<block_end><return>"(%s != %s)"%(self.expand(first query_env=query_env) self.expand(second first.type query_env=query_env) )<block_end><def_stmt>comma self first second query_env={}<block_start><return>"%s + %s"%(self.expand(first query_env=query_env) self.expand(second query_env=query_env) )<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>dash_core_components<as>dcc<import_stmt>dash_html_components<as>html<import_from_stmt>dash_docs styles<import_from_stmt>dash_docs tools<import_from_stmt>dash_docs reusable_components<as>rc<line_sep>examples=tools.load_examples(__file__)<line_sep>layout=html.Div(children=[html.H1("Markdown Examples and Reference") html.H2("Headers") rc.ComponentBlock("""import dash_core_components as dcc dcc.Markdown(''' # This is an <h1> tag ## This is an <h2> tag ###### This is an <h6> tag ''')""") html.H2("Emphasis") rc.ComponentBlock("""import dash_core_components as dcc dcc.Markdown(''' *This text will be italic* _This will also be italic_ **This text will be bold** __This will also be bold__ _You **can** combine them_ ''')""") html.Hr() html.H2("Lists") rc.ComponentBlock("""import dash_core_components as dcc dcc.Markdown(''' * Item 1 * Item 2 * Item 2a * Item 2b ''')""") html.Hr() html.H2("Block Quotes") rc.ComponentBlock("""import dash_core_components as dcc dcc.Markdown(''' > > Block quotes are used to highlight text. > ''')""") html.Hr() html.H2("Links") rc.ComponentBlock("""import dash_core_components as dcc dcc.Markdown(''' [Dash User Guide](/) ''')""") html.Hr() html.H2("Inline Code") html.P("Any block of text surrounded by ` ` will rendered as inline-code. ") # Don't use ComponentBlock for markdown block quotes... too complicated to # get all the nested quotes right! rc.Markdown(""" ````py import dash_core_components as dcc dcc.Markdown(''' Inline code snippet: `True` Block code snippet: ```py import dash external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) ``` ''') ```` """) html.Div(rc.Markdown(''' Inline code snippet: `True` Block code snippet: ```py import dash external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) ``` ''') className='example-container') rc.Markdown(''' Only certain languages are supported by default in `dcc.Markdown`. For more details about how to customize the languages and colour schemes, please see ["Syntax Highlighting With Markdown"](https://dash.plot.ly/external-resources#md-syntax-highlight). ''') html.H2('dcc.Markdown Properties') rc.ComponentReference('Markdown')])<line_sep>
# Import configurations <import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("test")<line_sep>process.load("CalibTracker.SiStripDCS.MessLogger_cfi")<line_sep>process.SiStripConfigDb=cms.Service("SiStripConfigDb" ConfDb=cms.untracked.string('username/password@cms_omds_nolb') TNS_ADMIN=cms.untracked.string('.') UsingDb=cms.untracked.bool(<true>) Partitions=cms.untracked.PSet(TPDD=cms.untracked.PSet(PartitionName=cms.untracked.string('TP_08-AUG-2008_1') ForceVersions=cms.untracked.bool(<true>) FecVersion=cms.untracked.vuint32(430 2) DcuDetIdsVersion=cms.untracked.vuint32(9 0)) TMDD=cms.untracked.PSet(PartitionName=cms.untracked.string('TM_08-AUG-2008_1') ForceVersions=cms.untracked.bool(<true>) FecVersion=cms.untracked.vuint32(428 1) DcuDetIdsVersion=cms.untracked.vuint32(9 0)) TIDD=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_08-AUG-2008_1') ForceVersions=cms.untracked.bool(<true>) FecVersion=cms.untracked.vuint32(427 1) DcuDetIdsVersion=cms.untracked.vuint32(9 0)) TODD=cms.untracked.PSet(PartitionName=cms.untracked.string('TO_08-AUG-2008_1') ForceVersions=cms.untracked.bool(<true>) FecVersion=cms.untracked.vuint32(415 3) DcuDetIdsVersion=cms.untracked.vuint32(9 0)) TEPD2=cms.untracked.PSet(PartitionName=cms.untracked.string('TE_27-JUN-2008_2') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(211 2)) TMPD=cms.untracked.PSet(PartitionName=cms.untracked.string('TE_17-JUN-2008_12') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(163 1)) TEPD1=cms.untracked.PSet(PartitionName=cms.untracked.string('TE_24-JUN-2008_2') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(204 1)) TEPD4=cms.untracked.PSet(PartitionName=cms.untracked.string('TE_30-JUN-2008_1') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(229 1)) TEPD3=cms.untracked.PSet(PartitionName=cms.untracked.string('TE_27-JUN-2008_4') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(214 1)) TPPD=cms.untracked.PSet(PartitionName=cms.untracked.string('TE_17-JUN-2008_11') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(162 1)) TIPD=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_17-JUN-2008_2') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(157 1)) TIPD2=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_18-JUN-2008_1') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(165 1)) TIPD3=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_18-JUN-2008_10') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(179 1)) TIPD4=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_20-JUN-2008_1') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(192 1)) TIPD5=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_27-JUN-2008_1') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(212 1)) TIPD6=cms.untracked.PSet(PartitionName=cms.untracked.string('TI_27-JUN-2008_3') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(218 1)) TOPD=cms.untracked.PSet(PartitionName=cms.untracked.string('TO_18-JUN-2008_1_TEST_1') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(177 1)) TOPD2=cms.untracked.PSet(PartitionName=cms.untracked.string('TO_18-JUN-2008_2') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(178 1)) TOPD3=cms.untracked.PSet(PartitionName=cms.untracked.string('TO_30-JUN-2008_1') ForceVersions=cms.untracked.bool(<true>) DcuPsuMapVersion=cms.untracked.vuint32(228 1))))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.source=cms.Source("EmptySource" numberEventsInRun=cms.untracked.uint32(1) firstRun=cms.untracked.uint32(1))<line_sep>process.load("CondCore.DBCommon.CondDBCommon_cfi")<line_sep>process.CondDBCommon.connect=cms.string('oracle://cms_omds_nolb/username')<line_sep>process.SiStripModuleHVBuilder=cms.Service("SiStripModuleHVBuilder" onlineDB=cms.untracked.string('oracle://cms_omds_nolb/username') authPath=cms.untracked.string('.') # Format for date/time vector: year, month, day, hour, minute, second, nanosecond Tmin=cms.untracked.vint32(2008 10 13 1 0 0 0) Tmax=cms.untracked.vint32(2008 10 13 12 0 0 0) # Do NOT change this unless you know what you are doing! TSetMin=cms.untracked.vint32(2007 11 26 0 0 0 0) # queryType can be either STATUSCHANGE or LASTVALUE queryType=cms.untracked.string('LASTVALUE') # if reading lastValue from file put insert file name here lastValueFile=cms.untracked.string('') # flag to show if you are reading from file for lastValue or not lastValueFromFile=cms.untracked.bool(<false>) # debugModeOn=cms.untracked.bool(<false>))<line_sep>process.PoolDBOutputService=cms.Service("PoolDBOutputService" BlobStreamerName=cms.untracked.string('TBufferBlobStreamingService') DBParameters=cms.PSet(messageLevel=cms.untracked.int32(2) authenticationPath=cms.untracked.string('/afs/cern.ch/cms/DB/conddb')) timetype=cms.untracked.string('timestamp') connect=cms.string('sqlite_file:dbfile.db') toPut=cms.VPSet(cms.PSet(record=cms.string('SiStripDetVOffRcd') tag=cms.string('SiStripDetVOff_Fake_31X'))) logconnect=cms.untracked.string('sqlite_file:logfile.db'))<line_sep>process.siStripPopConModuleHV=cms.EDAnalyzer("SiStripPopConModuleHV" record=cms.string('SiStripDetVOffRcd') loggingOn=cms.untracked.bool(<true>) SinceAppendMode=cms.bool(<true>) Source=cms.PSet(name=cms.untracked.string('default')))<line_sep>process.p=cms.Path(process.siStripPopConModuleHV)<line_sep>
""" Tests for offsets.BDay """<import_from_stmt>datetime date datetime timedelta <import_stmt>pytest<import_from_stmt>pandas._libs.tslibs.offsets ApplyTypeError BDay BMonthEnd <import_from_stmt>pandas DatetimeIndex _testing<as>tm <import_from_stmt>pandas.tests.tseries.offsets.common Base assert_is_on_offset assert_offset_equal <import_from_stmt>pandas.tests.tseries.offsets.test_offsets _ApplyCases<import_from_stmt>pandas.tseries offsets<as>offsets<class_stmt>TestBusinessDay(Base)<block_start>_offset=BDay<def_stmt>setup_method self method<block_start>self.d=datetime(2008 1 1)<line_sep>self.offset=BDay()<line_sep>self.offset1=self.offset<line_sep>self.offset2=BDay(2)<block_end><def_stmt>test_different_normalize_equals self# GH#21404 changed __eq__ to return False when `normalize` does not match <block_start>offset=self._offset()<line_sep>offset2=self._offset(normalize=<true>)<assert_stmt>offset<ne>offset2<block_end><def_stmt>test_repr self<block_start><assert_stmt>repr(self.offset)<eq>"<BusinessDay>"<assert_stmt>repr(self.offset2)<eq>"<2 * BusinessDays>"<line_sep>expected="<BusinessDay: offset=datetime.timedelta(days=1)>"<assert_stmt>repr(self.offset+timedelta(1))<eq>expected<block_end><def_stmt>test_with_offset self<block_start>offset=self.offset+timedelta(hours=2)<assert_stmt>(self.d+offset)<eq>datetime(2008 1 2 2)<block_end><def_stmt>test_with_offset_index self<block_start>dti=DatetimeIndex([self.d])<line_sep>result=dti+(self.offset+timedelta(hours=2))<line_sep>expected=DatetimeIndex([datetime(2008 1 2 2)])<line_sep>tm.assert_index_equal(result expected)<block_end><def_stmt>test_eq self<block_start><assert_stmt>self.offset2<eq>self.offset2<block_end><def_stmt>test_mul self<block_start><pass><block_end><def_stmt>test_hash self<block_start><assert_stmt>hash(self.offset2)<eq>hash(self.offset2)<block_end><def_stmt>test_call self<block_start><with_stmt>tm.assert_produces_warning(FutureWarning)# GH#34171 DateOffset.__call__ is deprecated <block_start><assert_stmt>self.offset2(self.d)<eq>datetime(2008 1 3)<block_end><block_end><def_stmt>testRollback1 self<block_start><assert_stmt>BDay(10).rollback(self.d)<eq>self.d<block_end><def_stmt>testRollback2 self<block_start><assert_stmt>BDay(10).rollback(datetime(2008 1 5))<eq>datetime(2008 1 4)<block_end><def_stmt>testRollforward1 self<block_start><assert_stmt>BDay(10).rollforward(self.d)<eq>self.d<block_end><def_stmt>testRollforward2 self<block_start><assert_stmt>BDay(10).rollforward(datetime(2008 1 5))<eq>datetime(2008 1 7)<block_end><def_stmt>test_roll_date_object self<block_start>offset=BDay()<line_sep>dt=date(2012 9 15)<line_sep>result=offset.rollback(dt)<assert_stmt>result<eq>datetime(2012 9 14)<line_sep>result=offset.rollforward(dt)<assert_stmt>result<eq>datetime(2012 9 17)<line_sep>offset=offsets.Day()<line_sep>result=offset.rollback(dt)<assert_stmt>result<eq>datetime(2012 9 15)<line_sep>result=offset.rollforward(dt)<assert_stmt>result<eq>datetime(2012 9 15)<block_end><def_stmt>test_is_on_offset self<block_start>tests=[(BDay() datetime(2008 1 1) <true>) (BDay() datetime(2008 1 5) <false>) ]<for_stmt>offset,d,expected tests<block_start>assert_is_on_offset(offset d expected)<block_end><block_end>apply_cases:_ApplyCases=[(BDay() {datetime(2008 1 1):datetime(2008 1 2) datetime(2008 1 4):datetime(2008 1 7) datetime(2008 1 5):datetime(2008 1 7) datetime(2008 1 6):datetime(2008 1 7) datetime(2008 1 7):datetime(2008 1 8) } ) (2<times>BDay() {datetime(2008 1 1):datetime(2008 1 3) datetime(2008 1 4):datetime(2008 1 8) datetime(2008 1 5):datetime(2008 1 8) datetime(2008 1 6):datetime(2008 1 8) datetime(2008 1 7):datetime(2008 1 9) } ) (-BDay() {datetime(2008 1 1):datetime(2007 12 31) datetime(2008 1 4):datetime(2008 1 3) datetime(2008 1 5):datetime(2008 1 4) datetime(2008 1 6):datetime(2008 1 4) datetime(2008 1 7):datetime(2008 1 4) datetime(2008 1 8):datetime(2008 1 7) } ) (-2<times>BDay() {datetime(2008 1 1):datetime(2007 12 28) datetime(2008 1 4):datetime(2008 1 2) datetime(2008 1 5):datetime(2008 1 3) datetime(2008 1 6):datetime(2008 1 3) datetime(2008 1 7):datetime(2008 1 3) datetime(2008 1 8):datetime(2008 1 4) datetime(2008 1 9):datetime(2008 1 7) } ) (BDay(0) {datetime(2008 1 1):datetime(2008 1 1) datetime(2008 1 4):datetime(2008 1 4) datetime(2008 1 5):datetime(2008 1 7) datetime(2008 1 6):datetime(2008 1 7) datetime(2008 1 7):datetime(2008 1 7) } ) ]<line_sep>@pytest.mark.parametrize("case" apply_cases)<def_stmt>test_apply self case<block_start>offset,cases=case<for_stmt>base,expected cases.items()<block_start>assert_offset_equal(offset base expected)<block_end><block_end><def_stmt>test_apply_large_n self<block_start>dt=datetime(2012 10 23)<line_sep>result=dt+BDay(10)<assert_stmt>result<eq>datetime(2012 11 6)<line_sep>result=dt+BDay(100)-BDay(100)<assert_stmt>result<eq>dt<line_sep>off=BDay()<times>6<line_sep>rs=datetime(2012 1 1)-off<line_sep>xp=datetime(2011 12 23)<assert_stmt>rs<eq>xp<line_sep>st=datetime(2011 12 18)<line_sep>rs=st+off<line_sep>xp=datetime(2011 12 26)<assert_stmt>rs<eq>xp<line_sep>off=BDay()<times>10<line_sep>rs=datetime(2014 1 5)+off# see #5890 xp=datetime(2014 1 17)<assert_stmt>rs<eq>xp<block_end><def_stmt>test_apply_corner self<block_start>msg="Only know how to combine business day with datetime or timedelta"<with_stmt>pytest.raises(ApplyTypeError match=msg)<block_start>BDay().apply(BMonthEnd())<block_end><block_end><block_end>
<import_stmt>sys os<import_stmt>re subprocess<line_sep>dataset=0<if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<ne>3<block_start>print('Usage: python %s <input> <output>'%sys.argv[0])<line_sep>sys.exit(1)<block_end><block_end><with_stmt>open(sys.argv[1])<as>fin<block_start><with_stmt>open(sys.argv[2] 'w')<as>fout<block_start><for_stmt>line fin<block_start>items=line.strip().split('&')<line_sep>name=items[0]<line_sep>m=re.match(r'.*?K\s*=\s*(\d+).*?' name)<assert_stmt>m name<line_sep>K=int(m.group(1))<if_stmt>K<g>128<block_start><continue><block_end>#print (K) m=re.match(r'.*?rounds\s*=\s*(\d+).*?' name)<assert_stmt>m name<line_sep>rounds=int(m.group(1))<line_sep>#print (rounds) m=re.match(r'.*?D\s*=\s*(\d+).*?' name)<assert_stmt>m name<line_sep>D=int(m.group(1))<line_sep>idx=2+2<times>dataset<line_sep>item1=items[idx]<line_sep>m=re.match(r'\s*([\d\.]+).*?' item1)<if_stmt><not>m<block_start><continue><block_end>bleu=float(m.group(1))<line_sep>item2=items[idx+1]<line_sep>m=re.match(r'\s*([\d\.]+).*?' item2)<assert_stmt>m item2<line_sep>latency=m.group(1)<line_sep>latency=float(latency)<line_sep>fout.write(f'{K}\t{rounds}\t{D}\t{bleu}\t{latency}\n')<line_sep>#print (D) #output_items = [] #output_items.append(name) #output_items.append(items[1]) #output_items[-1] = output_items[-1] + '\\\\' #fout.write('&'.join(output_items) + '\n') <block_end><block_end><block_end>
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=import-error,no-name-in-module """Test object relationships in the database."""<import_stmt>warnings<import_from_stmt>sqlalchemy exc<as>sa_exc<import_stmt>aiida<import_from_stmt>aiida.backends.sqlalchemy.models.node DbNode<import_from_stmt>aiida.backends.sqlalchemy.models.user DbUser<import_from_stmt>aiida.backends.testbase AiidaTestCase<import_from_stmt>aiida.common.links LinkType<import_from_stmt>aiida.common.utils get_new_uuid<import_from_stmt>aiida.orm CalculationNode Data<class_stmt>TestRelationshipsSQLA(AiidaTestCase)<block_start>"""Class of tests concerning the schema and the correct implementation of relationships within the AiiDA ORM The genereal naming convention is the following: 1)tests on one-to-many relationships: test_<Parent>_<child> (Parent class is capitalized). 2)tests on many-to-many relationships: test_<peer>_<peer> (none is capitalized)."""<def_stmt>test_outputs_children_relationship self<block_start>"""This test checks that the outputs_q, children_q relationship and the corresponding properties work as expected."""<line_sep>n_1=Data().store()<line_sep>n_2=CalculationNode()<line_sep>n_3=Data().store()<line_sep># Create a link between these 2 nodes n_2.add_incoming(n_1 link_type=LinkType.INPUT_CALC link_label='N1')<line_sep>n_2.store()<line_sep>n_3.add_incoming(n_2 link_type=LinkType.CREATE link_label='N2')<line_sep># Check that the result of outputs is a list self.assertIsInstance(n_1.backend_entity.dbmodel.outputs list 'This is expected to be a list')<line_sep># Check that the result of outputs_q is a query <import_from_stmt>sqlalchemy.orm.dynamic AppenderQuery<line_sep>self.assertIsInstance(n_1.backend_entity.dbmodel.outputs_q AppenderQuery 'This is expected to be an AppenderQuery')<line_sep># Check that the result of outputs is correct out={_.pk<for>_ n_1.backend_entity.dbmodel.outputs}<line_sep>self.assertEqual(out set([n_2.pk]))<block_end><def_stmt>test_inputs_parents_relationship self<block_start>"""This test checks that the inputs_q, parents_q relationship and the corresponding properties work as expected."""<line_sep>n_1=Data().store()<line_sep>n_2=CalculationNode()<line_sep>n_3=Data().store()<line_sep># Create a link between these 2 nodes n_2.add_incoming(n_1 link_type=LinkType.INPUT_CALC link_label='N1')<line_sep>n_2.store()<line_sep>n_3.add_incoming(n_2 link_type=LinkType.CREATE link_label='N2')<line_sep># Check that the result of outputs is a list self.assertIsInstance(n_1.backend_entity.dbmodel.inputs list 'This is expected to be a list')<line_sep># Check that the result of outputs_q is a query <import_from_stmt>sqlalchemy.orm.dynamic AppenderQuery<line_sep>self.assertIsInstance(n_1.backend_entity.dbmodel.inputs_q AppenderQuery 'This is expected to be an AppenderQuery')<line_sep># Check that the result of inputs is correct out={_.pk<for>_ n_3.backend_entity.dbmodel.inputs}<line_sep>self.assertEqual(out set([n_2.pk]))<block_end><def_stmt>test_user_node_1 self<block_start>"""Test that when a user and a node having that user are created, storing NODE induces storage of the USER Assert the correct storage of user and node."""<line_sep># Create user dbu1=DbUser('<EMAIL>' 'spam' 'eggs' 'monty')<line_sep># Creat node node_dict=dict(user=dbu1)<line_sep>dbn_1=DbNode(**node_dict)<line_sep># Check that the two are neither flushed nor committed self.assertIsNone(dbu1.id)<line_sep>self.assertIsNone(dbn_1.id)<line_sep>session=aiida.backends.sqlalchemy.get_scoped_session()<line_sep># Add only the node and commit session.add(dbn_1)<line_sep>session.commit()<line_sep># Check that a pk has been assigned, which means that things have # been flushed into the database self.assertIsNotNone(dbn_1.id)<line_sep>self.assertIsNotNone(dbu1.id)<block_end><def_stmt>test_user_node_2 self<block_start>"""Test that when a user and a node having that user are created, storing USER does NOT induce storage of the NODE Assert the correct storage of user and node."""<line_sep># Create user dbu1=DbUser('tests2<EMAIL>' 'spam' 'eggs' 'monty')<line_sep># Creat node node_dict=dict(user=dbu1)<line_sep>dbn_1=DbNode(**node_dict)<line_sep># Check that the two are neither flushed nor committed self.assertIsNone(dbu1.id)<line_sep>self.assertIsNone(dbn_1.id)<line_sep>session=aiida.backends.sqlalchemy.get_scoped_session()<line_sep># Catch all the SQLAlchemy warnings generated by the following code <with_stmt>warnings.catch_warnings()# pylint: disable=no-member <block_start>warnings.simplefilter('ignore' category=sa_exc.SAWarning)# pylint: disable=no-member # Add only the user and commit session.add(dbu1)<line_sep>session.commit()<block_end># Check that a pk has been assigned (or not), which means that things # have been flushed into the database self.assertIsNotNone(dbu1.id)<line_sep>self.assertIsNone(dbn_1.id)<block_end><def_stmt>test_user_node_3 self<block_start>"""Test that when a user and two nodes having that user are created, storing only ONE NODE induces storage of that node, of the user but not of the other node Assert the correct storage of the user and node. Assert the non-storage of the other node."""<line_sep># Create user dbu1=DbUser('tests3@schema' 'spam' 'eggs' 'monty')<line_sep># Creat node node_dict=dict(user=dbu1)<line_sep>dbn_1=DbNode(**node_dict)<line_sep>dbn_2=DbNode(**node_dict)<line_sep># Check that the two are neither flushed nor committed self.assertIsNone(dbu1.id)<line_sep>self.assertIsNone(dbn_1.id)<line_sep>self.assertIsNone(dbn_2.id)<line_sep>session=aiida.backends.sqlalchemy.get_scoped_session()<line_sep># Add only first node and commit session.add(dbn_1)<with_stmt>warnings.catch_warnings()# suppress known SAWarning that we have not added dbn_2 <block_start>warnings.simplefilter('ignore' category=sa_exc.SAWarning)<line_sep>session.commit()<block_end># Check for which object a pk has been assigned, which means that # things have been at least flushed into the database self.assertIsNotNone(dbu1.id)<line_sep>self.assertIsNotNone(dbn_1.id)<line_sep>self.assertIsNone(dbn_2.id)<block_end><def_stmt>test_user_node_4 self<block_start>"""Test that when several nodes are created with the same user and each of them is assigned to the same name, storage of last node object associated to that node does not trigger storage of all objects. Assert the correct storage of the user and node. Assert the non-storage of the other nodes."""<line_sep># Create user dbu1=DbUser('tests4@schema' 'spam' 'eggs' 'monty')<line_sep># Creat node objects assigningd them to the same name # Check https://docs.python.org/2/tutorial/classes.html subsec. 9.1 <for_stmt>_ range(5)# It is important to change the uuid each time (or any other # variable) so that a different objects (with a different pointer) # is actually created in this scope. <block_start>dbn_1=DbNode(user=dbu1 uuid=get_new_uuid())<block_end># Check that the two are neither flushed nor committed self.assertIsNone(dbu1.id)<line_sep>self.assertIsNone(dbn_1.id)<line_sep>session=aiida.backends.sqlalchemy.get_scoped_session()<line_sep># Add only first node and commit session.add(dbn_1)<with_stmt>warnings.catch_warnings()# suppress known SAWarning that we have not add the other nodes <block_start>warnings.simplefilter('ignore' category=sa_exc.SAWarning)<line_sep>session.commit()<block_end># Check for which object a pk has been assigned, which means that # things have been at least flushed into the database self.assertIsNotNone(dbu1.id)<line_sep>self.assertIsNotNone(dbn_1.id)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>re<import_stmt>json<import_stmt>scrapy<import_from_stmt>locations.items GeojsonPointItem<class_stmt>BunningsSpider(scrapy.Spider)<block_start>name="bunnings"<line_sep>allowed_domains=["bunnings.com.au"]<line_sep>start_urls=('https://www.bunnings.com.au/stores/' )<def_stmt>parse self response<block_start>raw_data=re.search("com_bunnings_locations_mapLocations = (.+);" response.text ).group(1)<line_sep>stores=json.loads(raw_data)<for_stmt>idx,store enumerate(stores)<block_start>store=store['Store']<line_sep>properties={"lat":store["Location"]["Latitude"] "lon":store["Location"]["Longitude"] "name":store["StoreName"] "addr_full":f'{store["Address"]["Address"]} {store["Address"]["AddressLineTwo"]}'.strip() "city":store["Address"]["Suburb"] "state":store["Address"]["State"] "postcode":store["Address"]["Postcode"] "country":"AU" "phone":store["Phone"] "website":response.urljoin(store["StoreUrl"]) "ref":idx}<line_sep><yield>GeojsonPointItem(**properties)<block_end><block_end><block_end>
""" Monthly Class Meteorological data provided by Meteostat (https://dev.meteostat.net) under the terms of the Creative Commons Attribution-NonCommercial 4.0 International Public License. The code is licensed under the MIT license. """<import_from_stmt>datetime datetime<import_from_stmt>typing Union<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>meteostat.core.cache get_file_path file_in_cache<import_from_stmt>meteostat.core.loader processing_handler load_handler<import_from_stmt>meteostat.utilities.validations validate_series<import_from_stmt>meteostat.utilities.aggregations degree_mean weighted_average<import_from_stmt>meteostat.interface.timeseries Timeseries<import_from_stmt>meteostat.interface.point Point<class_stmt>Monthly(Timeseries)<block_start>""" Retrieve monthly weather data for one or multiple weather stations or a single geographical point """<line_sep># The cache subdirectory cache_subdir:str='monthly'<line_sep># Default frequency _freq:str='1MS'<line_sep># Columns _columns:list=['year' 'month' 'tavg' 'tmin' 'tmax' 'prcp' 'snow' 'wdir' 'wspd' 'wpgt' 'pres' 'tsun']<line_sep># Index of first meteorological column _first_met_col=2<line_sep># Data types _types:dict={'tavg':'float64' 'tmin':'float64' 'tmax':'float64' 'prcp':'float64' 'snow':'float64' 'wdir':'float64' 'wspd':'float64' 'wpgt':'float64' 'pres':'float64' 'tsun':'float64'}<line_sep># Columns for date parsing _parse_dates:dict={'time':[0 1]}<line_sep># Default aggregation functions aggregations:dict={'tavg':'mean' 'tmin':'mean' 'tmax':'mean' 'prcp':'sum' 'snow':'max' 'wdir':degree_mean 'wspd':'mean' 'wpgt':'max' 'pres':'mean' 'tsun':'sum'}<def_stmt>_load self station:str<arrow><none><block_start>""" Load file from Meteostat """<line_sep># File name file='monthly/'+('full'<if>self._model<else>'obs')+'/'+station+'.csv.gz'<line_sep># Get local file path path=get_file_path(self.cache_dir self.cache_subdir file)<line_sep># Check if file in cache <if_stmt>self.max_age<g>0<and>file_in_cache(path self.max_age)# Read cached data <block_start>df=pd.read_pickle(path)<block_end><else_stmt># Get data from Meteostat <block_start>df=load_handler(self.endpoint file self._columns self._types self._parse_dates)<line_sep># Validate Series df=validate_series(df station)<line_sep># Save as Pickle <if_stmt>self.max_age<g>0<block_start>df.to_pickle(path)<block_end><block_end># Filter time period and append to DataFrame <if_stmt>self._start<and>self._end# Get time index <block_start>time=df.index.get_level_values('time')<line_sep># Filter & return <return>df.loc[(time<ge>self._start)&(time<le>self._end)]<block_end># Return <return>df<block_end><def_stmt>_get_data self<arrow><none><block_start>""" Get all required data """<if_stmt>len(self._stations)<g>0# List of datasets <block_start>datasets=[]<for_stmt>station self._stations<block_start>datasets.append((str(station) ))<block_end># Data Processing <return>processing_handler(datasets self._load self.processes self.threads)<block_end># Empty DataFrame <return>pd.DataFrame(columns=[*self._types])<block_end><def_stmt>_resolve_point self method:str stations:pd.DataFrame alt:int adapt_temp:bool<arrow><none><block_start>""" Project weather station data onto a single point """<if_stmt>self._stations.size<eq>0<or>self._data.size<eq>0<block_start><return><none><block_end><def_stmt>adjust_temp data:pd.DataFrame<block_start>""" Adjust temperature-like data based on altitude """<line_sep>data.loc[data['tavg']<ne>np.NaN 'tavg']=data['tavg']+((2/3)<times>((data['elevation']-alt)/100))<line_sep>data.loc[data['tmin']<ne>np.NaN 'tmin']=data['tmin']+((2/3)<times>((data['elevation']-alt)/100))<line_sep>data.loc[data['tmax']<ne>np.NaN 'tmax']=data['tmax']+((2/3)<times>((data['elevation']-alt)/100))<line_sep><return>data<block_end><if_stmt>method<eq>'nearest'<block_start><if_stmt>adapt_temp# Join elevation of involved weather stations <block_start>data=self._data.join(stations['elevation'] on='station')<line_sep># Adapt temperature-like data based on altitude data=adjust_temp(data)<line_sep># Drop elevation & round data=data.drop('elevation' axis=1).round(1)<block_end><else_stmt><block_start>data=self._data<block_end>self._data=self._data.groupby(pd.Grouper(level='time' freq=self._freq)).agg('first')<block_end><else_stmt># Join score and elevation of involved weather stations <block_start>data=self._data.join(stations[['score' 'elevation']] on='station')<line_sep># Adapt temperature-like data based on altitude <if_stmt>adapt_temp<block_start>data=adjust_temp(data)<block_end># Exclude non-mean data & perform aggregation excluded=data['wdir']<line_sep>excluded=excluded.groupby(pd.Grouper(level='time' freq=self._freq)).agg('first')<line_sep># Aggregate mean data data=data.groupby(pd.Grouper(level='time' freq=self._freq)).apply(weighted_average)<line_sep># Drop RangeIndex data.index=data.index.droplevel(1)<line_sep># Merge excluded fields data['wdir']=excluded<line_sep># Drop score and elevation self._data=data.drop(['score' 'elevation'] axis=1).round(1)<block_end># Set placeholder station ID self._data['station']='XXXXX'<line_sep>self._data=self._data.set_index(['station' self._data.index.get_level_values('time')])<line_sep>self._stations=pd.Index(['XXXXX'])<block_end><def_stmt>__init__ self loc:Union[pd.DataFrame Point list str] start:datetime=<none> end:datetime=<none> model:bool=<true><arrow><none># Set list of weather stations <block_start><if_stmt>isinstance(loc pd.DataFrame)<block_start>self._stations=loc.index<block_end><elif_stmt>isinstance(loc Point)<block_start>stations=loc.get_stations('monthly' start end model)<line_sep>self._stations=stations.index<block_end><else_stmt><block_start><if_stmt><not>isinstance(loc list)<block_start>loc=[loc]<block_end>self._stations=pd.Index(loc)<block_end># Set start date <if_stmt>start<is><not><none><block_start>self._start=start.replace(day=1)<block_end># Set end date self._end=end<line_sep># Set model self._model=model<line_sep># Get data for all weather stations self._data=self._get_data()<line_sep># Interpolate data <if_stmt>isinstance(loc Point)<block_start>self._resolve_point(loc.method stations loc.alt loc.adapt_temp)<block_end># Clear cache <if_stmt>self.max_age<g>0<and>self.autoclean<block_start>self.clear_cache()<block_end><block_end><def_stmt>expected_rows self<arrow>int<block_start>""" Return the number of rows expected for the defined date range """<line_sep><return>((self._end.year-self._start.year)<times>12+self._end.month-self._start.month)+1<block_end><block_end>
<import_from_stmt>setuptools setup find_packages<line_sep>setup(name="elegantrl" version="0.3.3" author="<NAME>, <NAME>, <NAME>, <NAME>" author_email="<EMAIL>" url="https://github.com/AI4Finance-LLC/ElegantRL" license="Apache 2.0" packages=find_packages() install_requires=["gym" "matplotlib" "numpy" "pybullet" "torch" "opencv-python" "box2d-py" ] description="Lightweight, Efficient and Stable DRL Implementation Using PyTorch" classifiers=[# Trove classifiers # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers "License :: OSI Approved :: Apache Software License" "Programming Language :: Python" "Programming Language :: Python :: 3" "Programming Language :: Python :: 3.6" "Programming Language :: Python :: 3.7" "Programming Language :: Python :: 3.8" "Programming Language :: Python :: Implementation :: CPython" "Programming Language :: Python :: Implementation :: PyPy" ] keywords="Deep Reinforcment Learning" python_requires=">=3.6" )<line_sep>
<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>os<import_stmt>h5py<import_stmt>numpy<as>np<import_stmt>sys<import_from_stmt>molecules.model MoleculeVAE<import_from_stmt>molecules.utils one_hot_array one_hot_index from_one_hot_array decode_smiles_from_indexes load_dataset<import_from_stmt>pylab figure axes scatter title show<import_from_stmt>rdkit Chem<import_from_stmt>rdkit.Chem Draw<line_sep>LATENT_DIM=292<line_sep>TARGET='autoencoder'<def_stmt>get_arguments <block_start>parser=argparse.ArgumentParser(description='Molecular autoencoder network')<line_sep>parser.add_argument('data' type=str help='File of latent representation tensors for decoding.')<line_sep>parser.add_argument('model' type=str help='Trained Keras model to use.')<line_sep>parser.add_argument('--save_h5' type=str help='Name of a file to write HDF5 output to.')<line_sep>parser.add_argument('--target' type=str default=TARGET help='What model to sample from: autoencoder, encoder, decoder.')<line_sep>parser.add_argument('--latent_dim' type=int metavar='N' default=LATENT_DIM help='Dimensionality of the latent representation.')<line_sep><return>parser.parse_args()<block_end><def_stmt>read_latent_data filename<block_start>h5f=h5py.File(filename 'r')<line_sep>data=h5f['latent_vectors'][:]<line_sep>charset=h5f['charset'][:]<line_sep>h5f.close()<line_sep><return>(data charset)<block_end><def_stmt>autoencoder args model<block_start>latent_dim=args.latent_dim<line_sep>data,charset=load_dataset(args.data split=<false>)<if_stmt>os.path.isfile(args.model)<block_start>model.load(charset args.model latent_rep_size=latent_dim)<block_end><else_stmt><block_start><raise>ValueError("Model file %s doesn't exist"%args.model)<block_end>sampled=model.autoencoder.predict(data[0].reshape(1 120 len(charset))).argmax(axis=2)[0]<line_sep>mol=decode_smiles_from_indexes(map(from_one_hot_array data[0]) charset)<line_sep>sampled=decode_smiles_from_indexes(sampled charset)<line_sep>print(mol)<line_sep>print(sampled)<block_end><def_stmt>decoder args model<block_start>latent_dim=args.latent_dim<line_sep>data,charset=read_latent_data(args.data)<if_stmt>os.path.isfile(args.model)<block_start>model.load(charset args.model latent_rep_size=latent_dim)<block_end><else_stmt><block_start><raise>ValueError("Model file %s doesn't exist"%args.model)<block_end>sampled=model.decoder.predict(data[0].reshape(1 latent_dim)).argmax(axis=2)[0]<line_sep>sampled=decode_smiles_from_indexes(sampled charset)<line_sep>print(sampled)<block_end><def_stmt>encoder args model<block_start>latent_dim=args.latent_dim<line_sep>data,charset=load_dataset(args.data split=<false>)<if_stmt>os.path.isfile(args.model)<block_start>model.load(charset args.model latent_rep_size=latent_dim)<block_end><else_stmt><block_start><raise>ValueError("Model file %s doesn't exist"%args.model)<block_end>x_latent=model.encoder.predict(data)<if_stmt>args.save_h5<block_start>h5f=h5py.File(args.save_h5 'w')<line_sep>h5f.create_dataset('charset' data=charset)<line_sep>h5f.create_dataset('latent_vectors' data=x_latent)<line_sep>h5f.close()<block_end><else_stmt><block_start>np.savetxt(sys.stdout x_latent delimiter='\t')<block_end><block_end><def_stmt>main <block_start>args=get_arguments()<line_sep>model=MoleculeVAE()<if_stmt>args.target<eq>'autoencoder'<block_start>autoencoder(args model)<block_end><elif_stmt>args.target<eq>'encoder'<block_start>encoder(args model)<block_end><elif_stmt>args.target<eq>'decoder'<block_start>decoder(args model)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_stmt>random randint<import_from_stmt>flask Flask request jsonify redirect make_response<line_sep>app=Flask(__name__)<line_sep>auth=randint(100 50000)<line_sep>@app.route('/get-auth' methods=['POST'])<def_stmt>get_auth_cookie <block_start>req=request.get_json()<if_stmt>req['pass']<eq>'<PASSWORD>'<block_start>res=make_response(jsonify({'auth':str(auth)}))<line_sep>res.set_cookie('auth' str(auth))<block_end><else_stmt><block_start>res=make_response(jsonify({'erro':'nao autorizado'}) 401)<line_sep>res.set_cookie('auth' '0')<block_end><return>res<block_end>@app.route('/get-complex-object' methods=['GET'])<def_stmt>get_complex_object <block_start>print(bool(request.args.get('returnObject')))<if_stmt>bool(request.args.get('returnObject'))<block_start>return_object={"complexObj":[{"id":"0001" "type":"donut" "name":"Cake" "ppu":0.55 "batters":{"batter":[{"id":"1001" "type":"Regular"} {"id":"1002" "type":"Chocolate"} {"id":"1003" "type":"Blueberry"} {"id":"1004" "type":"Devil's Food"}]} "topping":[{"id":"5001" "type":"None"} {"id":"5002" "type":"Glazed"} {"id":"5005" "type":"Sugar"} {"id":"5007" "type":"Powdered Sugar"} {"id":"5006" "type":"Chocolate with Sprinkles"} {"id":"5003" "type":"Chocolate"} {"id":"5004" "type":"Maple"}]} {"id":"0002" "type":"donut" "name":"Raised" "ppu":0.55 "batters":{"batter":[{"id":"1001" "type":"Regular"}]} "topping":[{"id":"5001" "type":"None"} {"id":"5002" "type":"Glazed"} {"id":"5005" "type":"Sugar"} {"id":"5003" "type":"Chocolate"} {"id":"5004" "type":"Maple"}]} {"id":"0003" "type":"donut" "name":"Old Fashioned" "ppu":0.55 "batters":{"batter":[{"id":"1001" "type":"Regular"} {"id":"1002" "type":"Chocolate"}]} "topping":[{"id":"5001" "type":"None"} {"id":"5002" "type":"Glazed"} {"id":"5003" "type":"Chocolate"} {"id":"5004" "type":"Maple"}]}]}<line_sep><return>jsonify(return_object)<block_end><return>jsonify({"erro":"erro"})<block_end>@app.route('/nao-autorizado-param' methods=['GET'])<def_stmt>get_redirect <block_start><if_stmt>request.args.get('auth')<and>int(request.args.get('auth'))<eq>auth<block_start><return>jsonify({'redirected':<false>})<block_end><return>redirect("http://localhost:5000/redirected" code=302)<block_end>@app.route('/nao-autorizado-cookie' methods=['GET'])<def_stmt>get_redirect_cookie <block_start><if_stmt>'auth'<in>request.cookies<and>request.cookies['auth']<eq>str(auth)<block_start><return>jsonify({'redirected':<false>})<block_end><return>redirect("http://localhost:5000/redirected" code=302)<block_end>@app.route('/redirected' methods=['GET'])<def_stmt>redirected <block_start><return>jsonify([{"redirected":<true>}])<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(debug=<true> host="0.0.0.0")<block_end># run app in debug mode on port 5000.
"""\ Lisp generator functions for wxToolBar objects @copyright: 2002-2004 <NAME>. aka crazyinsomniac on sourceforge @copyright: 2014-2016 <NAME> @license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY """<import_stmt>common<import_stmt>wcodegen<import_from_stmt>.tool *<line_sep>#from .codegen import ToolsHandler <class_stmt>LispCodeGenerator(wcodegen.LispWidgetCodeWriter)<block_start><def_stmt>get_properties_code self obj<block_start>prop=obj.properties<line_sep>out=[]<line_sep>append=out.append<line_sep>obj_name='(slot-%s obj)'%self.codegen._format_name(obj.name)<if_stmt>obj.properties["bitmapsize"].is_active()<block_start>w,h=obj.properties["bitmapsize"].get_tuple()<line_sep>append('(wxToolBar_SetToolBitmapSize %s %s %s)\n'%(obj_name w h))<block_end><if_stmt>obj.properties["margins"].is_active()<block_start>w,h=obj.properties["margins"].get_tuple()<line_sep>append('(wxToolBar_SetMargins %s %s %s)\n'%(obj_name w h))<block_end><if_stmt>obj.properties["packing"].is_active()<block_start>append('(wxToolBar_SetToolPacking %s %s)\n'%(obj_name obj.packing))<block_end><if_stmt>obj.properties["separation"].is_active()<block_start>append('(wxToolBar_SetToolSeparation %s %s)\n'%(obj_name obj.separation))<block_end><return>out<block_end><def_stmt>get_init_code self obj<block_start>out=[]<line_sep>append=out.append<line_sep>ids=[]<line_sep>obj_name=self.format_widget_access(obj)<for_stmt>tool obj.tools<block_start><if_stmt>tool.id<eq>'---'# item is a separator <block_start>append('(wxToolBar_AddSeparator %s)\n'%obj_name)<block_end><else_stmt><block_start>name,val=self.codegen.generate_code_id(<none> tool.id)<if_stmt><not>name<and>(<not>val<or>val<eq>'-1')<block_start>wid='Wx::NewId()'<block_end><else_stmt><block_start><if_stmt>name<block_start>ids.append(name)<block_end>wid=val<block_end>kinds=['wxITEM_NORMAL' 'wxITEM_CHECK' 'wxITEM_RADIO']<try_stmt><block_start>kind=kinds[int(tool.type)]<block_end><except_stmt>(IndexError ValueError)<block_start>kind='wxITEM_NORMAL'<block_end>bmp1=self.generate_code_bitmap(tool.bitmap1)<line_sep>bmp2=self.generate_code_bitmap(tool.bitmap2)<line_sep># append('%s->AddLabelTool(%s, %s, %s, %s, %s, %s, %s);\n' % append('(wxToolBar_AddTool %s %s %s %s %s %s %s %s)\n'%(obj_name wid self.codegen.quote_str(tool.label) bmp1 bmp2 kind self.codegen.quote_str(tool.short_help) self.codegen.quote_str(tool.long_help)))<block_end><block_end><return>ids+out<block_end><def_stmt>get_code self obj<block_start>"function that generates Lisp code for the toolbar of a wxFrame"<line_sep>style=obj.properties['style'].get_string_value()<if_stmt><not>style<block_start>style='wxTB_HORIZONTAL'<block_end><else_stmt><block_start>style<augadd>"|wxTB_HORIZONTAL"<line_sep>style=self.cn_f(style)<block_end>parent=self.format_widget_access(obj.parent_window)<line_sep>obj_name=self.codegen._format_name(obj.name)<line_sep>init=[';;; Tool Bar\n' '(setf (slot-%s obj) (wxToolBar_Create %s -1 -1 -1 -1 -1 %s))\n'%(obj_name parent style) ]+self.get_init_code(obj)+self.get_properties_code(obj)+['(wxFrame_SetToolBar (slot-top-window obj) (slot-%s obj))\n'%obj_name '(wxToolBar_Realize %s)\n'%self.format_widget_access(obj) ';;; Tool Bar end\n']<line_sep><return>init []<block_end><def_stmt>get_layout_code self obj<block_start>obj_name='(slot-%s obj)'%self.codegen._format_name(obj.name)<line_sep><return>['(wxToolBar_Realize %s)\n'%obj_name]<block_end><block_end><def_stmt>initialize <block_start>klass='wxToolBar'<line_sep>common.class_names['EditToolBar']=klass<line_sep>common.register('lisp' klass LispCodeGenerator(klass))<block_end>#, 'tools', ToolsHandler)
<import_stmt>os sys<line_sep>sys.path.insert(0 os.path.dirname(os.path.dirname(__file__)))<import_stmt>glob<import_stmt>fnmatch<import_stmt>traceback<import_stmt>logging<import_stmt>numpy<import_stmt>pytest<import_stmt>lasio<line_sep>test_dir=os.path.dirname(__file__)<line_sep>egfn=<lambda>fn:os.path.join(os.path.dirname(__file__) "examples" fn)<line_sep>stegfn=<lambda>vers fn:os.path.join(os.path.dirname(__file__) "examples" vers fn)<line_sep>logger=logging.getLogger(__name__)<def_stmt>read_file <block_start>las=lasio.read(stegfn("1.2" "sample_big.las"))<block_end><def_stmt>test_read_v12_sample_big benchmark<block_start>benchmark(read_file)<block_end>
# Natural Language Toolkit: Shoebox Errors # # Copyright (C) 2001-2006 NLTK Project # Author: <NAME> <<EMAIL>> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT """ This module provides Shoebox exceptions. """<line_sep># --------------------------------------------------------------------- # CLASS: ShoeboxError # DESC: ??? # --------------------------------------------------------------------- <class_stmt>ShoeboxError(Exception)<block_start>""" This is the base class for all Shoebox errors. """<def_stmt>__init__ self<block_start>self._msg=""<block_end><block_end># --------------------------------------------- # CLASS: ValidationError # DESC: ??? # --------------------------------------------- <class_stmt>NonUniqueEntryError(ShoeboxError)<block_start>""" ??? """<def_stmt>__init__ self<block_start><pass><block_end><block_end><class_stmt>ValidationError(ShoeboxError)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>setField self field<block_start>self._field=field<block_end><def_stmt>getField self<block_start><return>self._field<block_end><block_end># --------------------------------------------- # CLASS: NoMetadataFound # DESC: ??? # --------------------------------------------- <class_stmt>NoMetadataFound(ValidationError)<block_start><def_stmt>__init__ self field<block_start>self._field=field<block_end><block_end><class_stmt>FieldError(ShoeboxError)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>__str__ self<block_start><return>self.get_message()<block_end><block_end><class_stmt>NonUniqueFieldError(FieldError)<block_start>""" Error raised when an attempt is made to retrieve a unique field which has more than one value """<def_stmt>__init__ self entry<block_start>self._entry=entry<block_end><def_stmt>setEntry self entry<block_start>self._entry=entry<block_end><def_stmt>getEntry self<block_start><return>self._entry<block_end><block_end># --------------------------------------------- # CLASS: BadFieldValue # DESC: ??? # --------------------------------------------- <class_stmt>BadFieldValueError(ValidationError FieldError)<block_start>FIELD_VALUE_ERROR_RANGE_SET='1'<line_sep>FIELD_VALUE_ERROR_NO_WORD_WRAP='2'<line_sep>FIELD_VALUE_ERROR_EMPTY_VALUE='3'<line_sep>FIELD_VALUE_ERROR_SINGLE_WORD='4'<line_sep>errorTypes={'1':"Range Set" '2':"No Word Wrap" '3':"Empty Value" '4':"Single Word"}<def_stmt>__init__ self errorType entry field fmMetadata<block_start>self._entry=entry<line_sep>self._errorType=errorType<line_sep>self._field=field<line_sep>self._fmMetadata=fmMetadata<block_end><def_stmt>__str__ self<block_start>e=self.getEntry()<line_sep>f=self.getField()<line_sep>typ=self.getErrorDescription()<line_sep>s="'%s' error in '\\%s' field of record %i!\nRecord:\n%s"%(typ f.getMarker() e.getNumber() e.getRawText())<line_sep><return>s<block_end><def_stmt>getFieldMarkerMetadata self<block_start><return>self._fmMetadata<block_end><def_stmt>setFieldMarkerMetadata self fmMetadata<block_start>self._fmMetadata=fmMetadata<block_end><def_stmt>getErrorDescription self<block_start><try_stmt><block_start><return>self.errorTypes[self.getErrorType()]<block_end><except_stmt><block_start><return><none><block_end><block_end><def_stmt>getErrorType self<block_start><return>self._errorType<block_end><def_stmt>setErrorType self errorType<block_start>self._errorType=errorType<block_end><def_stmt>getEntry self<block_start><return>self._entry<block_end><def_stmt>setEntry self entry<block_start>self._entry=entry<block_end><block_end>
<import_stmt>argparse<import_stmt>boto3<import_stmt>json<import_stmt>os<import_stmt>sys<import_from_stmt>six.moves urllib<import_stmt>uuid<import_stmt>traceback<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>dictionary_sorter divide<import_from_stmt>dictionary_sorter merge<import_from_stmt>dictionary_sorter build<import_from_stmt>harness config<import_from_stmt>harness decider<import_from_stmt>harness worker<import_from_stmt>harness cloudwatch<if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("-d" "--domain" required=<true>)<line_sep>parser.add_argument("-t" "--task-list" required=<true>)<line_sep>parser.add_argument("--div-task" required=<true>)<line_sep>parser.add_argument("--div-task-version" default="1.0")<line_sep>parser.add_argument("--merge-task" required=<true>)<line_sep>parser.add_argument("--merge-task-version" default="1.0")<line_sep>parser.add_argument("--build-task" default=<none>)<line_sep>parser.add_argument("--build-task-version" default="1.0")<line_sep>parser.add_argument("-rd" "--run-decider" action="store_true")<line_sep>parser.add_argument("--region" default=<none>)<line_sep>parser.add_argument("--config-bucket" default=<none>)<line_sep>parser.add_argument("--log-group" default=<none>)<line_sep>parser.add_argument("--log-db" default=<none>)<line_sep>parser.add_argument("--kvs-db" default=<none>)<line_sep>parser.add_argument("--profile" default=<none>)<line_sep>parser.add_argument("--role-arn" default=<none>)<line_sep>parser.add_argument("--stdout" default=<none>)<line_sep>args=parser.parse_args()<try_stmt># Fetch instance identity: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html <block_start><with_stmt>urllib.request.urlopen('http://169.254.169.254/latest/dynamic/instance-identity/document')<as>response<block_start>info=json.load(response)<line_sep>ec2_region=info['region']<line_sep>identity=info['instanceId']<line_sep>print("Running on EC2 instance {} in region {}".format(identity ec2_region))<block_end><block_end><except_stmt><block_start>ec2_region="us-east-1"<line_sep>identity=os.environ.get("COMPUTERNAME" "<unavailable>")<line_sep>print("Couldn't load EC2 instance data from environment, using computer hostname {}".format(identity))<block_end><if_stmt><not>args.region<block_start>args.region=ec2_region<block_end># You can supply a profile to use if you are testing locally. session=boto3.Session(region_name=args.region profile_name=args.profile)<line_sep># You can supply a role arn to use if you are testing locally. <if_stmt>args.role_arn<block_start>sts_result=session.client('sts').assume_role(DurationSeconds=3600 RoleSessionName="Harness-"+str(uuid.uuid4()) RoleArn=args.role_arn)['Credentials']<line_sep>session=boto3.Session(region_name=args.region aws_access_key_id=sts_result['AccessKeyId'] aws_secret_access_key=sts_result['SecretAccessKey'] aws_session_token=sts_result['SessionToken'])<block_end><if_stmt>args.stdout<block_start><if_stmt>args.stdout<eq>'cloudwatch'<block_start>writeHandler=cloudwatch.OutputHandler('HARNESS-DEBUG' session args.region identity 'decider'<if>args.run_decider<else>'worker')<block_end><else_stmt><block_start>fp=open(args.stdout "w")<line_sep>sys.stdout=fp<line_sep>sys.stderr=fp<block_end><block_end>divide_task=config.TaskConfig(args.div_task args.div_task_version divide.handler)<line_sep>merge_task=config.TaskConfig(args.merge_task args.merge_task_version merge.handler)<line_sep>build_task=config.TaskConfig(args.build_task args.build_task_version build.handler)<if>args.build_task<else>merge_task<line_sep>harness_config=config.Config(session args.region args.domain args.task_list divide_task build_task merge_task args.log_group args.log_db args.kvs_db args.config_bucket identity)<try_stmt><block_start><if_stmt>args.run_decider<block_start>decider.run_decider(harness_config)<block_end><else_stmt><block_start>worker.run_worker(harness_config)<block_end><block_end><except_stmt>Exception<as>e<block_start>message="Error - "+str(e)+"\n"+traceback.format_exc()<line_sep>print(message)<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ P1 tests for Add Remove Network to VM Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Add+Remove+networks+to+VM+Test+cases Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-645 Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Add+Remove+Networks+to+VMs """<import_stmt>random<import_stmt>time<import_stmt>unittest<import_from_stmt>ddt ddt data<import_from_stmt>marvin.cloudstackAPI addNicToVirtualMachine removeNicFromVirtualMachine updateDefaultNicForVirtualMachine <import_from_stmt>marvin.cloudstackTestCase cloudstackTestCase<import_from_stmt>marvin.codes PASS<import_from_stmt>marvin.lib.base Account Domain ServiceOffering VirtualMachine NetworkOffering Network VpcOffering VPC PublicIPAddress FireWallRule NATRule <import_from_stmt>marvin.lib.common get_domain get_zone get_template list_virtual_machines list_events list_zones get_free_vlan update_resource_limit list_nat_rules <import_from_stmt>marvin.lib.utils validateList random_gen get_hypervisor_type <line_sep># Import Local Modules <import_from_stmt>nose.plugins.attrib attr<class_stmt>Services<block_start>"""Test Add Remove Network Services """<def_stmt>__init__ self<block_start>self.services={"sleep":60 "ostype":"CentOS 5.3 (64-bit)" # Cent OS 5.3 (64 bit) "isolated_network_offering":{"name":'Test Isolated Network offering' "displaytext":'Test Isolated Network offering' "guestiptype":'Isolated' "supportedservices":'Dhcp,Dns,SourceNat,PortForwarding' "traffictype":'GUEST' "availability":'Optional' "serviceProviderList":{"Dhcp":'VirtualRouter' "Dns":'VirtualRouter' "SourceNat":'VirtualRouter' "PortForwarding":'VirtualRouter' } } "shared_network_offering":{"name":'Test Shared Network Offering' "displaytext":'Test Shared Network Offering' "guestiptype":'Shared' "supportedservices":'Dhcp,Dns,UserData' "specifyVlan":"True" "specifyIpRanges":"True" "traffictype":'GUEST' "serviceProviderList":{"Dhcp":'VirtualRouter' "Dns":'VirtualRouter' "UserData":'VirtualRouter'} } "shared_network":{"name":"Test Shared Network" "displaytext":"Test Shared Network" "gateway":"172.16.17.1" "netmask":"255.255.255.0" "startip":"172.16.17.2" "endip":"172.16.17.20" } "shared_network_2":{"name":"Test Shared Network" "displaytext":"Test Shared Network" "gateway":"172.16.18.1" "netmask":"255.255.255.0" "startip":"172.16.18.2" "endip":"172.16.18.20" } "isolated_network":{"name":"Test Isolated Network" "displaytext":"Test Isolated Network" } "service_offering":{"name":"Tiny Instance" "displaytext":"Tiny Instance" "cpunumber":1 "cpuspeed":100 # in MHz "memory":256 # In MBs } "account":{"email":"<EMAIL>" "firstname":"Test_add_remove_network_vm" "lastname":"User" "username":"test_add_remove_network_vm" "password":"password" } "domain":{"name":"Domain_add_nw_to_vm" } "virtual_machine":{"displayname":"testserver" "username":"root" # VM creds for SSH "password":"password" "ssh_port":22 "hypervisor":'XenServer' "privateport":22 "publicport":22 "protocol":'TCP' } "vpc_offering":{"name":'VPC off add remove network' "displaytext":'VPC off add remove network' "supportedservices":'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL' } "vpc":{"name":"TestVPC add remove network" "displaytext":"TestVPC add remove network" "cidr":'10.0.0.1/24'} "natrule":{"privateport":22 "publicport":22 "protocol":"TCP"} }<block_end><block_end>@ddt<class_stmt>TestAddNetworkToVirtualMachine(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestAddNetworkToVirtualMachine cls).getClsTestClient()<line_sep>cls.api_client=cls.testClient.getApiClient()<line_sep>cls.services=Services().services<line_sep>hypervisor=get_hypervisor_type(cls.api_client)<if_stmt>hypervisor.lower()<not><in>["xenserver" "kvm"]<block_start><raise>unittest.SkipTest("This feature is supported only on XenServer and KVM")<block_end>cls.services=Services().services<line_sep># Get Zone, Domain and templates cls.domain=get_domain(cls.api_client)<line_sep>cls.zone=get_zone(cls.api_client cls.testClient.getZoneForTests())<line_sep>template=get_template(cls.api_client cls.zone.id cls.services["ostype"])<line_sep># Set Zones and disk offerings cls.services["virtual_machine"]["zoneid"]=cls.zone.id<line_sep>cls.services["virtual_machine"]["template"]=template.id<line_sep># Create Accounts & networks cls.services["isolated_network"]["zoneid"]=cls.zone.id<line_sep>cls.services["shared_network"]["zoneid"]=cls.zone.id<line_sep>cls._cleanup=[]<line_sep>cls.account=Account.create(cls.api_client cls.services["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep>cls.service_offering=ServiceOffering.create(cls.api_client cls.services["service_offering"])<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.virtual_machine=VirtualMachine.create(cls.api_client cls.services["virtual_machine"] accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id mode=cls.zone.networktype)<line_sep>cls._cleanup.append(cls.virtual_machine)<line_sep>cls.defaultNetworkId=cls.virtual_machine.nic[0].networkid<line_sep>cls.isolated_network_offering=NetworkOffering.create(cls.api_client cls.services["isolated_network_offering"])<line_sep>cls._cleanup.append(cls.isolated_network_offering)<line_sep>cls.isolated_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.shared_network_offering=NetworkOffering.create(cls.api_client cls.services["shared_network_offering"])<line_sep>cls._cleanup.append(cls.shared_network_offering)<line_sep>cls.shared_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.isolated_network=Network.create(cls.api_client cls.services["isolated_network"] cls.account.name cls.account.domainid networkofferingid=cls.isolated_network_offering.id)<line_sep>cls._cleanup.append(cls.isolated_network)<line_sep>cls.services["shared_network"]["vlan"]=get_free_vlan(cls.api_client cls.zone.id)[1]<line_sep>shared_network_subnet_number=random.randrange(1 254)<line_sep>cls.services["shared_network"]["gateway"]="172.16."+str(shared_network_subnet_number)+".1"<line_sep>cls.services["shared_network"]["startip"]="172.16."+str(shared_network_subnet_number)+".2"<line_sep>cls.services["shared_network"]["endip"]="172.16."+str(shared_network_subnet_number)+".20"<line_sep>cls.shared_nw_endip=cls.services["shared_network"]["endip"]<line_sep>cls.shared_network=Network.create(cls.api_client cls.services["shared_network"] cls.account.name cls.account.domainid networkofferingid=cls.shared_network_offering.id)<line_sep>cls._cleanup.append(cls.shared_network)<line_sep><return><block_end><def_stmt>setUp self<block_start>self.apiclient=self.testClient.getApiClient()<line_sep>self.dbclient=self.testClient.getDbConnection()<line_sep>self.addednics=[]<line_sep>shared_network_subnet_number=random.randrange(1 254)<line_sep>self.services["shared_network"]["gateway"]="172.16."+str(shared_network_subnet_number)+".1"<line_sep>self.services["shared_network"]["startip"]="172.16."+str(shared_network_subnet_number)+".2"<line_sep>self.services["shared_network"]["endip"]="172.16."+str(shared_network_subnet_number)+".20"<line_sep>self.services["shared_network_2"]["gateway"]="172.16."+str(shared_network_subnet_number+1)+".1"<line_sep>self.services["shared_network_2"]["startip"]="172.16."+str(shared_network_subnet_number+1)+".2"<line_sep>self.services["shared_network_2"]["endip"]="172.16."+str(shared_network_subnet_number+1)+".20"<line_sep>self.cleanup=[]<block_end><def_stmt>tearDown self<block_start><try_stmt><block_start><for_stmt>nic self.addednics<block_start>self.virtual_machine.remove_nic(self.apiclient nic.id)<block_end><block_end><except_stmt>Exception<as>e<block_start>self.debug("Exception during removal of nics : %s"%e)<block_end>super(TestAddNetworkToVirtualMachine self).tearDown()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt># Disable Network Offerings <block_start>cls.isolated_network_offering.update(cls.api_client state='Disabled')<line_sep>cls.shared_network_offering.update(cls.api_client state='Disabled')<block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during disable of networks : %s"%e)<block_end>super(TestAddNetworkToVirtualMachine cls).tearDownClass()<block_end><def_stmt>addNetworkToVm self network vm ipaddress=<none><block_start>"""Add network to VM and check if new nic added in the VM"""<line_sep>self.debug("Adding %s Network: %s to virtual machine %s"%(network.type network.id vm.id))<line_sep>vm.add_nic(self.apiclient network.id ipaddress=ipaddress)<line_sep>vm_list=list_virtual_machines(self.apiclient id=vm.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>self.debug("virtual machine nics: %s"%vm_list[0].nic)<line_sep>nics=[x<for>x vm_list[0].nic<if>x.networkid<eq>network.id]<line_sep>self.debug("Filtered nics list: %s:"%nics)<line_sep># Only the nics added to self.virtual_machine should be added to this list # Nics added to their list are removed before execution of next test case because we are using # same virtual machine in all test cases, so it is important that the common # virtual machine should contain only the default nic whenever new test case # execution starts <if_stmt>vm.id<eq>self.virtual_machine.id<block_start>self.addednics.append(nics[-1])<block_end>self.assertTrue(len(nics)<eq>1 "nics list should contain the nic of added isolated network,\ the number of nics for the network should be 1, instead they are %s"%len(nics))<if_stmt>ipaddress<is><not><none><block_start>self.assertEqual(nics[0].ipaddress ipaddress "The ip address of nic does not match with \ the ip address passed while adding network to vm. ip address of nic is %s \ while passed ip address is %s"%(nics[0].ipaddress ipaddress))<block_end><return><block_end>@attr(tags=["advanced" "dvs"])@data("isolated" "shared")<def_stmt>test_01_add_nw_running_vm self value<block_start>"""Add network to running VM"""<line_sep># 1. Deploy VM in an account # 2. Add isolated/shared network to the VM which is in running state # Validate the following: # 1. New nic is generated for the added network # 2. Event NIC.CREATE is generated network=<none># The network which we are adding to the vm <if_stmt>value<eq>"isolated"<block_start>network=self.isolated_network<block_end><elif_stmt>value<eq>"shared"<block_start>network=self.shared_network<block_end><if_stmt>network<is><none><block_start>self.skipTest("Network should not be none. Case not handled for Network of type %s"%value)<block_end>self.addNetworkToVm(network self.virtual_machine)<line_sep>self.debug("Retrieving the list of events matching 'NIC.CREATE' in account: %s"%self.account.name)<line_sep>events=list_events(self.apiclient account=self.account.name domainid=self.account.domainid type='NIC.CREATE')<line_sep>event_list_validation_result=validateList(events)<line_sep>self.assertEqual(event_list_validation_result[0] PASS "event list validation failed due to %s"%event_list_validation_result[2])<line_sep>self.debug("Events list contains event NIC.CREATE")<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])@data("isolated" "shared")<def_stmt>test_02_add_nw_stopped_vm self value<block_start>"""Add network to stopped VM"""<line_sep># 1. Deploy VM in an account # 2. Stop the VM # 3. Add isolated/shared network to the stopped VM # Validate the following: # 1. New nic is generated for the added network <try_stmt><block_start>self.virtual_machine.stop(self.apiclient)<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to stop VM: %s"%e)<block_end>network=<none># The network which we are adding to the vm <if_stmt>value<eq>"isolated"<block_start>network=self.isolated_network<block_end><elif_stmt>value<eq>"shared"<block_start>network=self.shared_network<block_end><if_stmt>network<is><none><block_start>self.skipTest("Network should not be none. Case not handled for Network of type %s"%value)<block_end>self.addNetworkToVm(network self.virtual_machine)<line_sep>self.debug("Starting Virtual Machine: %s"%self.virtual_machine.id)<line_sep>self.virtual_machine.start(self.apiclient)<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])@data("isolated" "shared")<def_stmt>test_03_add_nw_multiple_times self value<block_start>"""Add same network multiple times to running VM"""<line_sep># 1. Deploy VM in an account # 2. Add isolated/shared network to the VM # 3. Try Adding same network again to the VM # Validate the following: # 1. Adding same network to vm multiple times fails network=<none># The network which we are adding to the vm <if_stmt>value<eq>"isolated"<block_start>network=self.isolated_network<block_end><elif_stmt>value<eq>"shared"<block_start>network=self.shared_network<block_end><if_stmt>network<is><none><block_start>self.skipTest("Network should not be none. Case not handled for Network of type %s"%value)<block_end><try_stmt><block_start>virtual_machine=VirtualMachine.create(self.api_client self.services["virtual_machine"] accountid=self.account.name domainid=self.account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype networkids=[self.defaultNetworkId])<line_sep>self.cleanup.append(virtual_machine)<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to deply virtual machine: %s"%e)<block_end># Adding network to vm for the first time self.addNetworkToVm(network virtual_machine)<line_sep># Trying to add same network to vm for the second time <with_stmt>self.assertRaises(Exception)<as>e<block_start>self.addNetworkToVm(network virtual_machine)<line_sep>self.debug("Adding same network again failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])@data("isolated")<def_stmt>test_04_vpc_nw_running_vm self value<block_start>"""Add VPC network to running VM belonging to isolated network"""<line_sep># 1. Deploy VM in an account # 2. Add isolated network to the VM # 3. Create VPC # 4. Try adding VPC to the VM # Validate the following: # 1. Adding VPC to vm should fail <try_stmt><block_start>virtual_machine=VirtualMachine.create(self.api_client self.services["virtual_machine"] accountid=self.account.name domainid=self.account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype networkids=[self.defaultNetworkId])<line_sep>self.cleanup.append(virtual_machine)<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to deply virtual machine: %s"%e)<block_end>network=self.isolated_network<line_sep>self.addNetworkToVm(network virtual_machine)<line_sep>self.debug("Creating VPC offering")<line_sep>vpc_off=VpcOffering.create(self.api_client self.services["vpc_offering"])<line_sep>self.cleanup.append(vpc_off)<line_sep>self.debug("Created VPC offering: %s"%vpc_off.id)<line_sep>self.debug("Enabling the VPC offering")<line_sep>vpc_off.update(self.apiclient state='Enabled')<line_sep>self.debug("Creating VPC")<line_sep>vpc=VPC.create(self.apiclient self.services["vpc"] vpcofferingid=vpc_off.id zoneid=self.zone.id account=self.account.name domainid=self.account.domainid)<line_sep>self.cleanup.append(vpc)<line_sep>self.debug("Trying to add VPC to vm belonging to isolated network, this should fail")<with_stmt>self.assertRaises(Exception)<block_start>virtual_machine.add_nic(self.apiclient vpc.id)<block_end>self.debug("Disabling vpc offering: %s"%vpc_off.id)<line_sep>vpc_off.update(self.apiclient state='Disabled')<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])@data("isolated")<def_stmt>test_05_add_vpc_nw_stopped_vm self value<block_start>"""Add VPC network to stopped VM belonging to isolated network"""<line_sep># 1. Deploy VM in an account # 2. Stop the VM # 3. Add isolated network to the VM # 4. Create VPC # 5. Try adding VPC to the stopped VM # Validate the following: # 1. Adding VPC to vm should fail <try_stmt><block_start>self.virtual_machine.stop(self.apiclient)<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to stop virtual machine: %s"%e)<block_end>self.addNetworkToVm(self.isolated_network self.virtual_machine)<line_sep>self.debug("Creating VPC offering")<line_sep>vpc_off=VpcOffering.create(self.api_client self.services["vpc_offering"])<line_sep>self.cleanup.append(vpc_off)<line_sep>self.debug("Created VPC offering: %s"%vpc_off.id)<line_sep>self.debug("Enabling the VPC offering")<line_sep>vpc_off.update(self.apiclient state='Enabled')<line_sep>self.debug("Creating VPC")<line_sep>vpc=VPC.create(self.apiclient self.services["vpc"] vpcofferingid=vpc_off.id zoneid=self.zone.id account=self.account.name domainid=self.account.domainid)<line_sep>self.cleanup.append(vpc)<line_sep>self.debug("Trying to add VPC to vm belonging to isolated network, this should fail")<with_stmt>self.assertRaises(Exception)<block_start>self.virtual_machine.add_nic(self.apiclient vpc.id)<block_end>self.debug("Starting virtual machine")<line_sep>self.virtual_machine.start(self.apiclient)<line_sep>self.debug("Disabling vpc offering: %s"%vpc_off.id)<line_sep>vpc_off.update(self.apiclient state='Disabled')<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_06_add_nw_ipaddress_running_vm self<block_start>"""Add network and ip address to running VM"""<line_sep># 1. Deploy VM in an account # 2. Add shared network and ip address to this VM # Validate the following: # 1. New nic gets added for the shared network # 2. The newly added nic has the ip address same as # that passed while adding the network <try_stmt><block_start>virtual_machine=VirtualMachine.create(self.api_client self.services["virtual_machine"] accountid=self.account.name domainid=self.account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype networkids=[self.defaultNetworkId])<line_sep>self.cleanup.append(virtual_machine)<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to deply virtual machine: %s"%e)<block_end>ipaddress=self.shared_nw_endip<line_sep>self.debug("Adding network to vm with ip address %s: "%ipaddress)<line_sep>self.addNetworkToVm(self.shared_network virtual_machine ipaddress=ipaddress)<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_10_add_nw_invalid_ipaddress_running_vm self<block_start>"""Add network with invalid ip address to running VM"""<line_sep># 1. Deploy VM in an account # 2. Add shared network with invalid ip address to this VM # Validate the following: # 1. Adding network to VM should fail because of invalid ip address ipaddress="257.257.257.257"# Invalid ip address self.debug("Adding network to vm with ip address %s: "%ipaddress)<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.addNetworkToVm(self.shared_network self.virtual_machine ipaddress=ipaddress)<line_sep>self.debug("API failed with exception: %s"%e.exception)<block_end><return><block_end># was tags=["advanced", "dvs"], # the apiclient that is being used to test this has to much rights? @attr(tags=["TODO"])@data("isolated" "shared")<def_stmt>test_14_add_nw_different_account self value<block_start>"""Add network to running VM"""<line_sep># 1. Deploy VM in an account # 2. Create new account under same domain and create network in that account # 3. Add isolated/shared network belonging to other account to the VM in first account # Validate the following: # 1. Adding network should fail network=<none># The network which we are adding to the vm account=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<if_stmt>value<eq>"isolated"<block_start>network=Network.create(self.api_client self.services["isolated_network"] account.name account.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(network)<block_end><elif_stmt>value<eq>"shared"<block_start>self.services["shared_network_2"]["zoneid"]=self.zone.id<line_sep>self.services["shared_network_2"]["vlan"]=get_free_vlan(self.apiclient self.zone.id)[1]<line_sep>network=Network.create(self.api_client self.services["shared_network_2"] account.name account.domainid networkofferingid=self.shared_network_offering.id)<line_sep>self.cleanup.append(network)<block_end><if_stmt>network<is><none><block_start>self.skipTest("Network should not be none. Case not handled for Network of type %s"%value)<block_end>self.debug("Trying to %s network in account %s to a vm in account %s, This should fail"%(network.type account.name self.account.name))<try_stmt><block_start>vm_with_nic=self.virtual_machine.add_nic(self.apiclient network.id)<line_sep>nics=[x<for>x vm_with_nic.nic<if>x.networkid<eq>network.id]<line_sep>self.addednics.append(nics[-1])<block_end><except_stmt>Exception<block_start><pass><block_end><else_stmt><block_start>self.fail("User was able to add NIC, test failed! This issue has been hit: CLOUDSTACK-10071")<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_24_add_nw_different_domain self<block_start>"""Add network to running VM"""<line_sep># 1. Create two domains # 2. Create network in one domain and create virtual machine in other domain # 3. Ad isolated/shared network belonging to one domain to the vm belonging to other domain # Validate the following: # 1. Adding network should fail network=<none># The network which we are adding to the vm <try_stmt><block_start>self.child_domain_1=Domain.create(self.apiclient services=self.services["domain"] parentdomainid=self.domain.id)<line_sep>self.cleanup.append(self.child_domain_1)<line_sep>self.child_do_admin_1=Account.create(self.apiclient self.services["account"] admin=<true> domainid=self.child_domain_1.id)<line_sep>self.cleanup.append(self.child_do_admin_1)<line_sep>self.child_domain_2=Domain.create(self.apiclient services=self.services["domain"] parentdomainid=self.domain.id)<line_sep>self.cleanup.append(self.child_domain_2)<line_sep>self.child_do_admin_2=Account.create(self.apiclient self.services["account"] admin=<true> domainid=self.child_domain_2.id)<line_sep>self.cleanup.append(self.child_do_admin_2)<block_end><except_stmt>Exception<as>e<block_start>self.fail(e)<block_end>network=Network.create(self.api_client self.services["isolated_network"] self.child_do_admin_1.name self.child_do_admin_1.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(network)<line_sep>virtual_machine=VirtualMachine.create(self.apiclient self.services["virtual_machine"] accountid=self.child_do_admin_2.name domainid=self.child_do_admin_2.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype)<line_sep>self.cleanup.append(virtual_machine)<line_sep>time.sleep(self.services["sleep"])<line_sep>self.debug("Trying to %s network in domain %s to a vm in domain %s, This should fail"%(network.type self.child_domain_1.name self.child_domain_2.name))<with_stmt>self.assertRaises(Exception)<as>e<block_start>virtual_machine.add_nic(self.apiclient network.id)<line_sep>self.debug("Operation failed with exception %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_25_add_nw_above_account_limit self<block_start>"""Add network to VM with maximum network limit reached"""<line_sep># 1. Create an account and create maximum allowed networks in the account # 2. Deploy VM in this account # 3. Create a network in other account and add to this VM # Validate the following: # 1. Adding network should fail self.debug("Creating account 1")<line_sep>account_1=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account_1)<line_sep>self.debug("setting network limit of account: %s as 1"%account_1.name)<line_sep>update_resource_limit(self.apiclient 6 # Network max=1 account=account_1.name domainid=account_1.domainid)<line_sep>self.debug("Creating isolated network in account: %s"%account_1.name)<line_sep>network_1=Network.create(self.api_client self.services["isolated_network"] account_1.name account_1.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(network_1)<line_sep>self.debug("created network %s"%network_1.name)<line_sep>self.debug("Deploying virtual machine in account: %s"%account_1.name)<line_sep>virtual_machine=VirtualMachine.create(self.apiclient self.services["virtual_machine"] accountid=account_1.name domainid=account_1.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype)<line_sep>self.cleanup.append(virtual_machine)<line_sep>self.debug("Deployed virtual machine : %s"%virtual_machine.id)<line_sep>self.debug("Creating another account")<line_sep>account_2=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account_2)<line_sep>self.debug("Created account %s"%account_2.name)<line_sep>self.debug("Creating network in account %s"%account_2.name)<line_sep>network_2=Network.create(self.api_client self.services["isolated_network"] account_2.name account_2.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(network_2)<line_sep>self.debug("Created network %s"%network_2.name)<line_sep>self.debug("Trying to add netwrok %s to VM %s, this should fail"%(network_2.name virtual_machine.id))<with_stmt>self.assertRaises(Exception)<as>e<block_start>virtual_machine.add_nic(self.apiclient network_2.id)<line_sep>self.debug("Operation failed with exception %s"%e.exception)<block_end><return><block_end><block_end><class_stmt>TestRemoveNetworkFromVirtualMachine(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestRemoveNetworkFromVirtualMachine cls).getClsTestClient()<line_sep>cls.api_client=cls.testClient.getApiClient()<line_sep>cls.services=Services().services<line_sep>hypervisor=get_hypervisor_type(cls.api_client)<if_stmt>hypervisor.lower()<not><in>["xenserver" "kvm"]<block_start><raise>unittest.SkipTest("This feature is supported only on XenServer and KVM")<block_end># Get Zone, Domain and templates cls.domain=get_domain(cls.api_client)<line_sep>cls.zone=get_zone(cls.api_client cls.testClient.getZoneForTests())<line_sep>template=get_template(cls.api_client cls.zone.id cls.services["ostype"])<line_sep># Set Zones and disk offerings cls.services["virtual_machine"]["zoneid"]=cls.zone.id<line_sep>cls.services["virtual_machine"]["template"]=template.id<line_sep># Create Accounts & networks cls.services["isolated_network"]["zoneid"]=cls.zone.id<line_sep>cls.services["shared_network"]["zoneid"]=cls.zone.id<line_sep>cls._cleanup=[]<line_sep>cls.account=Account.create(cls.api_client cls.services["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep>cls.service_offering=ServiceOffering.create(cls.api_client cls.services["service_offering"])<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.virtual_machine=VirtualMachine.create(cls.api_client cls.services["virtual_machine"] accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id mode=cls.zone.networktype)<line_sep>cls._cleanup.append(cls.virtual_machine)<line_sep># Create Shared Network Offering cls.isolated_network_offering=NetworkOffering.create(cls.api_client cls.services["isolated_network_offering"])<line_sep>cls._cleanup.append(cls.isolated_network_offering)<line_sep># Enable Isolated Network offering cls.isolated_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.isolated_network=Network.create(cls.api_client cls.services["isolated_network"] cls.account.name cls.account.domainid networkofferingid=cls.isolated_network_offering.id)<line_sep>cls._cleanup.append(cls.isolated_network)<line_sep><return><block_end><def_stmt>setUp self<block_start>self.apiclient=self.testClient.getApiClient()<line_sep>self.dbclient=self.testClient.getDbConnection()<line_sep>self.cleanup=[]<line_sep>self.addednics=[]<block_end><def_stmt>tearDown self<block_start><try_stmt><block_start><for_stmt>nic self.addednics<block_start>self.virtual_machine.remove_nic(self.apiclient nic.id)<block_end><block_end><except_stmt>Exception<as>e<block_start>self.debug("Exception during removal of nics : %s"%e)<block_end>super(TestRemoveNetworkFromVirtualMachine self).tearDown()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start>cls.isolated_network_offering.update(cls.api_client state='Disabled')<block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during disabling network offering : %s"%e)<block_end>super(TestRemoveNetworkFromVirtualMachine cls).tearDownClass()<block_end><def_stmt>addNetworkToVm self network vm<block_start>"""Add network to VM and check if new nic added in the VM"""<line_sep>self.debug("Adding %s Network: %s to virtual machine %s"%(network.type network.id vm.id))<line_sep>vm.add_nic(self.apiclient network.id)<line_sep>vm_list=list_virtual_machines(self.apiclient id=vm.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>self.debug("virtual machine nics: %s"%vm_list[0].nic)<line_sep># Add nic of network to list so that it can be deleted later accessing its id from this list self.nics=[x<for>x vm_list[0].nic<if>x.networkid<eq>network.id]<line_sep>self.debug("Filtered nics list: %s:"%self.nics)<line_sep>self.assertTrue(len(self.nics)<eq>1 "nics list should contain the nic of added isolated network,\ the number of nics for the network should be 1, instead they are %s"%len(self.nics))<line_sep><return>self.nics<block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_07_remove_nic_running_vm self<block_start>"""Remove nic from running VM"""<line_sep># 1. Deploy Vm in account # 2. Add network to VM # 3. Remove the nic added by the newly added network # Validate the following: # 1. Newly added nic is removed # 2. Event NIC.DELETE is generated self.addNetworkToVm(self.isolated_network self.virtual_machine)<line_sep># Access the nic of the added network from self.nics object which is fillled # in addNetworkToVm function self.debug("Removing added nic %s from vm %s"%(self.nics[0].id self.virtual_machine.id))<line_sep>self.virtual_machine.remove_nic(self.apiclient self.nics[0].id)<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>self.debug("virtual machine nics: %s"%vm_list[0].nic)<line_sep># Verify the nic is removed from the virtual machine self.debug("Verifying the nic is removed from the virtual machine")<line_sep>self.assertFalse(any(x.networkid<eq>self.isolated_network.id<for>x vm_list[0].nic) "nic still present in the virtual machine nic list")<line_sep>self.debug("nic removed successfully")<line_sep>self.debug("Retrieving events list matching events 'NIC.DELETE'")<line_sep>events=list_events(self.apiclient account=self.account.name domainid=self.account.domainid type='NIC.DELETE')<line_sep>event_list_validation_result=validateList(events)<line_sep>self.assertEqual(event_list_validation_result[0] PASS "vm list validation failed due to %s"%event_list_validation_result[2])<line_sep>self.debug("Events list contains event NIC.DELETE")<line_sep>self.debug("events: %s"%events)<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_08_remove_default_nic self<block_start>"""Test Remove default nic of running VM"""<line_sep># 1. Deploy Vm in account # 2. Try to remove the default nic of the VM # Validate the following: # 1. Default nic of vm is not removed vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>self.debug("virtual machine nics: %s"%vm_list[0].nic)<line_sep>self.assertEqual(len(vm_list[0].nic) 1 "There should only be default nic present in the vm")<line_sep>self.debug("Trying to remove the default nic of vm : %s, this should fail"%self.virtual_machine.id)<with_stmt>self.assertRaises(Exception)<block_start>self.virtual_machine.remove_nic(self.apiclient vm_list[0].nic[0].id)<line_sep>self.debug("Removing default nic of vm failed")<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_09_remove_foreign_nic self<block_start>"""Remove nic which does not belong to VM"""<line_sep># 1. Add VM in an account # 1. Add new account and deploy vm in it # 2. Try to remove nic of the new vm from first vm # Validate the following: # 1. Nic remove operation should fail self.debug("Creating new account")<line_sep>account=Account.create(self.api_client self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<line_sep>self.debug("created new account : %s"%account.name)<line_sep>self.debug("Deploying virtual machine in this account")<line_sep>virtual_machine=VirtualMachine.create(self.apiclient self.services["virtual_machine"] accountid=account.name domainid=account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype)<line_sep>self.debug("Deployed virtual machine: %s"%virtual_machine.id)<line_sep>self.debug("Trying to remove nic of new virtual machine from existing virtual machine, This \ operation should fail")<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.virtual_machine.remove_nic(self.apiclient virtual_machine.nic[0].id)<line_sep>self.debug("Operation failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced"] required_hardware="true")<def_stmt>test_29_remove_nic_CS22503 self<block_start>"""Test to verify remove nic from vm if the nic ip is same as another vm ip in another network"""<line_sep># 1. Deploy vm v1 with networks n1 and n2 # 2. Check the ip address of nic in n2 say ip1 # 3. Deployed vm v2 in another network say n3 with same IP address as ip1 using # 'deployVirtualMachine' api with 'ipaddress' as one of the parameters. # 4. Acquire public IP in n3 network. # 5. Configure PF on the acquired IP and assign it to vm v2 # 6. Try to remove nic n2 from v1. Should be successfull # There was a bug due to both vms has same ip address, so not allowing to remove nic vm1=self.virtual_machine<line_sep>nic2=self.addNetworkToVm(self.isolated_network vm1)<line_sep>self.addednics.append(nic2)<line_sep># get the ip address of the nic added in 2nd network vm1_ip=nic2[0].ipaddress<line_sep>self.assertIsNotNone(vm1_ip "New nic did not get the ip address")<line_sep># Create network n3 self.network3=Network.create(self.api_client self.services["isolated_network"] self.account.name self.account.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(self.network3)<line_sep>self.vm2=VirtualMachine.create(self.api_client self.services["virtual_machine"] accountid=self.account.name domainid=self.account.domainid serviceofferingid=self.service_offering.id networkids=[self.network3.id] ipaddress=vm1_ip mode=self.zone.networktype)<line_sep>self.cleanup.append(self.vm2)<line_sep>vm2=VirtualMachine.list(self.api_client id=self.vm2.id)<line_sep>self.assertEqual(validateList(vm2)[0] PASS "list vms returned invalid response")<line_sep>self.assertIsNotNone(vm2[0].nic[0].ipaddress "vm2 didn't get the ip address")<line_sep>self.assertEqual(vm1_ip vm2[0].nic[0].ipaddress "vm2 did not get the ip address passed while deploying vm")<line_sep>ip_address=PublicIPAddress.create(self.apiclient self.account.name self.zone.id self.account.domainid self.services["virtual_machine"] self.network3.id)<line_sep>self.cleanup.append(ip_address)<line_sep># Open up firewall port for SSH FireWallRule.create(self.apiclient ipaddressid=ip_address.ipaddress.id protocol=self.services["natrule"]["protocol"] cidrlist=['0.0.0.0/0'] startport=self.services["natrule"]["publicport"] endport=self.services["natrule"]["publicport"])<line_sep># Create NAT rule nat_rule=NATRule.create(self.apiclient self.vm2 self.services["natrule"] ip_address.ipaddress.id)<line_sep>list_nat_rule_response=list_nat_rules(self.apiclient id=nat_rule.id)<line_sep>self.assertEqual(validateList(list_nat_rule_response)[0] PASS "Check list response returns a valid list")<line_sep>self.assertEqual(list_nat_rule_response[0].id nat_rule.id "Check Correct Port forwarding Rule is returned")<line_sep># Try to remove nic 2 from vm1 <try_stmt><block_start>vm1.remove_nic(self.apiclient self.nics[0].id)<line_sep>vm1_res=VirtualMachine.list(self.apiclient id=vm1.id)<line_sep>self.assertEqual(validateList(vm1_res)[0] PASS "invalid listvm response")<line_sep>self.assertEqual(len(vm1_res[0].nic) 1 "VM has more than one nic even after removing the 2nd nic")<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to delete the nic from vm")<block_end><return><block_end>@attr(tags=["advanced"] required_hardware="true")<def_stmt>test_30_remove_nic_reattach self<block_start>""" Test to verify vm start after NIC removal and reattach # 1.Create vm which has 3 nics(e.g. #0,#1,#2) # 2.Stop the vm # 3.Remove second nic(#1) # 4.Add/Reattach same network(#1) # 5.Start the instance """<line_sep>self.ntwk2=Network.create(self.apiclient self.services["isolated_network"] self.account.name self.account.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(self.ntwk2)<line_sep>self.ntwk3=Network.create(self.apiclient self.services["isolated_network"] self.account.name self.account.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(self.ntwk3)<line_sep>self.test_vm=VirtualMachine.create(self.apiclient self.services["virtual_machine"] accountid=self.account.name domainid=self.account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype networkids=[self.isolated_network.id self.ntwk2.id self.ntwk3.id])<line_sep>self.cleanup.append(self.test_vm)<line_sep>self.assertIsNotNone(self.test_vm "Failed to create vm with 3 nics")<line_sep>vm_res=VirtualMachine.list(self.apiclient id=self.test_vm.id)<line_sep>self.assertEqual(validateList(vm_res)[0] PASS "Invalid list vm response")<line_sep>self.nics=vm_res[0].nic<line_sep>self.assertEqual(validateList(self.nics)[0] PASS "vm response does not contain nics info")<line_sep>self.assertEqual(len(self.nics) 3 "Not all nics found in vm response")<line_sep>self.test_vm.stop(self.apiclient)<line_sep>vm_res2=VirtualMachine.list(self.apiclient id=self.test_vm.id)<line_sep>self.assertEqual(validateList(vm_res2)[0] PASS "Invalid response")<line_sep>self.assertEqual(vm_res2[0].state "Stopped" "VM did not stop properly")<line_sep>""" get the network id of the nic which we are remove from the nic, so that we can use that network id for reattach """<line_sep>nic_to_attach=[x<for>x [self.isolated_network self.ntwk2 self.ntwk3]<if>x.id<eq>self.nics[1].networkid]<line_sep>self.assertEqual(validateList(nic_to_attach)[0] PASS "No matching nics")<line_sep>self.assertEqual(len(nic_to_attach) 1 "More than one nic in same network")<try_stmt><block_start>self.test_vm.remove_nic(self.apiclient nicId=self.nics[1].id)<line_sep>self.test_vm.add_nic(self.apiclient nic_to_attach[0].id)<line_sep>self.test_vm.start(self.apiclient)<block_end><except_stmt>Exception<as>e<block_start>self.fail("Failed to start vm after nic removal and attachment")<block_end>vm_res3=VirtualMachine.list(self.apiclient id=self.test_vm.id)<line_sep>self.assertEqual(validateList(vm_res3)[0] PASS "Invalid listvm response after nic detach and attach")<line_sep>self.assertEqual(vm_res3[0].state "Running" "VM didn't come to running state after nic detach and attach")<line_sep>vm_nics=vm_res3[0].nic<line_sep>self.assertEqual(validateList(vm_nics)[0] PASS "Invalid nics after vm stop/start")<line_sep>self.assertEqual(len(vm_nics) 3 "Nic is not attached/detected")<line_sep>self.addednics.extend(vm_nics)<line_sep><return><block_end><block_end><class_stmt>TestUpdateVirtualMachineNIC(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestUpdateVirtualMachineNIC cls).getClsTestClient()<line_sep>cls.api_client=cls.testClient.getApiClient()<line_sep>cls.services=Services().services<line_sep>hypervisor=get_hypervisor_type(cls.api_client)<if_stmt>hypervisor.lower()<not><in>["xenserver" "kvm"]<block_start><raise>unittest.SkipTest("This feature is supported only on XenServer and KVM")<block_end># Get Zone, Domain and templates cls.domain=get_domain(cls.api_client)<line_sep>cls.zone=get_zone(cls.api_client cls.testClient.getZoneForTests())<line_sep>template=get_template(cls.api_client cls.zone.id cls.services["ostype"])<line_sep># Set Zones and disk offerings cls.services["virtual_machine"]["zoneid"]=cls.zone.id<line_sep>cls.services["virtual_machine"]["template"]=template.id<line_sep># Create Accounts & networks cls.services["isolated_network"]["zoneid"]=cls.zone.id<line_sep>cls.services["shared_network"]["zoneid"]=cls.zone.id<line_sep>cls._cleanup=[]<line_sep>cls.account=Account.create(cls.api_client cls.services["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep>cls.service_offering=ServiceOffering.create(cls.api_client cls.services["service_offering"])<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.virtual_machine=VirtualMachine.create(cls.api_client cls.services["virtual_machine"] accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id mode=cls.zone.networktype)<line_sep>cls._cleanup.append(cls.virtual_machine)<line_sep>cls.isolated_network_offering=NetworkOffering.create(cls.api_client cls.services["isolated_network_offering"])<line_sep>cls._cleanup.append(cls.isolated_network_offering)<line_sep>cls.isolated_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.isolated_network=Network.create(cls.api_client cls.services["isolated_network"] cls.account.name cls.account.domainid networkofferingid=cls.isolated_network_offering.id)<line_sep>cls._cleanup.append(cls.isolated_network)<line_sep><return><block_end><def_stmt>setUp self<block_start>self.apiclient=self.testClient.getApiClient()<line_sep>self.dbclient=self.testClient.getDbConnection()<line_sep>self.cleanup=[]<line_sep>self.addednics=[]<block_end><def_stmt>tearDown self<block_start><try_stmt><block_start><for_stmt>nic self.addednics<block_start>self.virtual_machine.remove_nic(self.apiclient nic.id)<block_end><block_end><except_stmt>Exception<as>e<block_start>self.debug("Exception during removal of nics : %s"%e)<block_end>super(TestUpdateVirtualMachineNIC self).tearDown()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start>cls.isolated_network_offering.update(cls.api_client state='Disabled')<block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during disable of network offering : %s"%e)<block_end>super(TestUpdateVirtualMachineNIC cls).tearDownClass()<block_end><def_stmt>addNetworkToVm self network vm<block_start>"""Add network to VM and check if new nic added in the VM"""<line_sep>self.debug("Adding %s Network: %s to virtual machine %s"%(network.type network.id vm.id))<line_sep>vm.add_nic(self.apiclient network.id)<line_sep>vm_list=list_virtual_machines(self.apiclient id=vm.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>self.debug("virtual machine nics: %s"%vm_list[0].nic)<line_sep># Add nic of network to list so that it can be deleted later accessing its id from this list self.nics=[x<for>x vm_list[0].nic<if>x.networkid<eq>network.id]<line_sep>self.debug("Filtered nics list: %s:"%self.nics)<line_sep>self.assertTrue(len(self.nics)<eq>1 "nics list should contain the nic of added isolated network,\ the number of nics for the network should be 1, instead they are %s"%len(self.nics))<line_sep>self.addednics.append(self.nics[0])<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_11_update_nic_running_vm self<block_start>"""update default nic of running VM"""<line_sep># 1. Deploy Vm in account # 2. Add network to VM # 3. Update default nic of VM (Make the newly added NIC as default) # Validate the following: # 1. Default nic is updated # 2. Previous default nic is now non-default # 3. Event NIC.UPDATE is generated self.addNetworkToVm(self.isolated_network self.virtual_machine)<line_sep>self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<if_stmt>len(vm_list[0].nic)<ne>2<block_start>self.fail("VM should have exactly two NICs")<block_end>defaultNicIdBeforeUpdate=<none><line_sep>nonDefaultNicIdBeforeUpdate=<none><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicIdBeforeUpdate=nic.id<block_end><else_stmt><block_start>nonDefaultNicIdBeforeUpdate=nic.id<block_end><block_end>self.debug("Default nic of VM is %s and non default nic of VM is %s"%(defaultNicIdBeforeUpdate nonDefaultNicIdBeforeUpdate))<line_sep>self.debug("Making non default nic as default nic")<line_sep>self.virtual_machine.update_default_nic(self.apiclient nicId=nonDefaultNicIdBeforeUpdate)<line_sep>self.debug("Again listing the NIC list of VM to verify the update operation was successful")<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<if_stmt>len(vm_list[0].nic)<ne>2<block_start>self.fail("VM should have exactly two NICs")<block_end><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicIdAfterUpdate=nic.id<block_end><block_end>self.assertEqual(nonDefaultNicIdBeforeUpdate defaultNicIdAfterUpdate "old non default NIC not made\ default one, update_default_nic API failed")<line_sep>self.debug("Retrieving events list matching events 'NIC.UPDATE'")<line_sep>events=list_events(self.apiclient account=self.account.name domainid=self.account.domainid type='NIC.UPDATE')<line_sep>event_list_validation_result=validateList(events)<line_sep>self.assertEqual(event_list_validation_result[0] PASS "event list validation failed due to %s"%event_list_validation_result[2])<line_sep>self.debug("Events list contains event NIC.UPDATE")<line_sep>self.debug("events: %s"%events)<line_sep><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_12_make_default_nic_as_default self<block_start>"""Try to set default nic of vm again as default"""<line_sep># 1. Deploy Vm in account # 2. Set default nic of vm again as default # Validate the following: # 1. updateDefaultNic API fails self.debug("Listing virtual machine to get default nic")<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>defaultNicId=<none><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicId=nic.id<block_end><block_end>self.debug("Trying to set default nic again as default nic, This should fail")<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.virtual_machine.update_default_nic(self.apiclient nicId=defaultNicId)<line_sep>self.debug("updateDefaultNic operation failed as expected with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_13_set_foreign_nic_as_default self<block_start>"""set nic which does not belong to VM as its default one"""<line_sep># 1. Add VM in an account # 1. Add new account and deploy vm in it # 2. Try to set nic of the new vm as default nic of first vm # Validate the following: # 1. updateDefaultNic operation should fail self.debug("Creating new account")<line_sep>account=Account.create(self.api_client self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<line_sep>self.debug("created new account : %s"%account.name)<line_sep>self.debug("Deploying virtual machine in this account")<line_sep>virtual_machine=VirtualMachine.create(self.apiclient self.services["virtual_machine"] accountid=account.name domainid=account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype)<line_sep>self.cleanup.append(virtual_machine)<line_sep>time.sleep(self.services["sleep"])<line_sep>self.debug("Deployed virtual machine: %s"%virtual_machine.id)<line_sep>foreignNicId=virtual_machine.nic[0].id<line_sep>self.debug("Trying to set nic of new virtual machine as default nic of existing virtual machine, This \ operation should fail")<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.virtual_machine.update_default_nic(self.apiclient nicId=foreignNicId)<line_sep>self.debug("updateDefaultNic operation failed as expected with exception: %s"%e.exception)<block_end><return><block_end><block_end><class_stmt>TestFailureScenariosAddNetworkToVM(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestFailureScenariosAddNetworkToVM cls).getClsTestClient()<line_sep>cls.api_client=cls.testClient.getApiClient()<line_sep>cls.services=Services().services<line_sep>hypervisor=get_hypervisor_type(cls.api_client)<if_stmt>hypervisor.lower()<not><in>["xenserver" "kvm"]<block_start><raise>unittest.SkipTest("This feature is supported only on XenServer and KVM")<block_end># Get Zone, Domain and templates cls.domain=get_domain(cls.api_client)<line_sep>cls.zone=get_zone(cls.api_client cls.testClient.getZoneForTests())<line_sep>template=get_template(cls.api_client cls.zone.id cls.services["ostype"])<line_sep># Set Zones and disk offerings cls.services["virtual_machine"]["zoneid"]=cls.zone.id<line_sep>cls.services["virtual_machine"]["template"]=template.id<line_sep># Create Accounts & networks cls.services["isolated_network"]["zoneid"]=cls.zone.id<line_sep>cls._cleanup=[]<line_sep>cls.account=Account.create(cls.api_client cls.services["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep>cls.service_offering=ServiceOffering.create(cls.api_client cls.services["service_offering"])<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.virtual_machine=VirtualMachine.create(cls.api_client cls.services["virtual_machine"] accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id mode=cls.zone.networktype)<line_sep>cls._cleanup.append(cls.virtual_machine)<line_sep>cls.isolated_network_offering=NetworkOffering.create(cls.api_client cls.services["isolated_network_offering"] )<line_sep>cls._cleanup.append(cls.isolated_network_offering)<line_sep>cls.isolated_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.isolated_network=Network.create(cls.api_client cls.services["isolated_network"] cls.account.name cls.account.domainid networkofferingid=cls.isolated_network_offering.id)<line_sep>cls._cleanup.append(cls.isolated_network)<line_sep><return><block_end><def_stmt>setUp self<block_start>self.apiclient=self.testClient.getApiClient()<line_sep>self.dbclient=self.testClient.getDbConnection()<line_sep>self.cleanup=[]<block_end><def_stmt>tearDown self<block_start>super(TestFailureScenariosAddNetworkToVM self).tearDown()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start>cls.isolated_network_offering.update(cls.api_client state='Disabled')<block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during disabling network offering : %s"%e)<block_end>super(TestFailureScenariosAddNetworkToVM cls).tearDownClass()<block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_15_add_nic_wrong_vm_id self<block_start>"""Add network to vm with wrong vm id"""<line_sep># 1. Call add network to VM API with correct network id but wrong vm id # Validate the following: # 1. API should throw exception saying unable to find virtual machine cmd=addNicToVirtualMachine.addNicToVirtualMachineCmd()<line_sep>cmd.virtualmachineid=random_gen(id="virtual_machine" size=30)<line_sep>cmd.networkid=self.isolated_network.id<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.addNicToVirtualMachine(cmd)<line_sep>self.debug("addNicToVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_16_add_nic_wrong_network_id self<block_start>"""Add network to vm with wrong network id"""<line_sep># 1. Call add network to VM API with correct network id but wrong network id # Validate the following: # 1. API should throw exception saying unable to find a network cmd=addNicToVirtualMachine.addNicToVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.networkid=random_gen(id="network_id" size=30)<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.addNicToVirtualMachine(cmd)<line_sep>self.debug("addNicToVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_17_add_nic_different_zone self<block_start>"""Add network to vm where both belong to different zones"""<line_sep># 1. Deploy a VM in zone 1 # 2. Create a network in zone 2 # 3. Try to add this network to the VM (both belong to different zones) # Validate the following: # 1. API should throw exception vminstance is in zone<id>, but network is in zone <id> foreignZoneId=<none><line_sep>zones=list_zones(self.apiclient available=<true>)<line_sep>list_zones_validation_result=validateList(zones)<line_sep>self.assertEqual(list_zones_validation_result[0] PASS "list zones validation failed due to: %s"%list_zones_validation_result[2])<if_stmt>len(zones)<ge>2<block_start><for_stmt>zone zones<block_start><if_stmt>zone.id<ne>self.zone.id<block_start>foreignZoneId=zone.id<line_sep><break><block_end><block_end><else_stmt><block_start>self.skipTest("This test requires at least two zones to be present in the setup")<block_end>self.services["isolated_network"]["zoneid"]=foreignZoneId<block_end>self.debug("Creating isolated network in zone %s which is foreign to VM"%foreignZoneId)<line_sep>isolated_network=Network.create(self.apiclient self.services["isolated_network"] self.account.name self.account.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(isolated_network)<line_sep>self.debug("Created isolated network %s in zone %s"%(isolated_network.id foreignZoneId))<line_sep>self.debug("Trying to add network to VM, both belonging to different zones")<line_sep>cmd=addNicToVirtualMachine.addNicToVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.networkid=isolated_network.id<with_stmt>self.assertRaises(Exception)<as>e<block_start>time.sleep(5)<line_sep>self.apiclient.addNicToVirtualMachine(cmd)<line_sep>self.debug("addNicToVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["invalid"])<def_stmt>test_18_add_nic_basic_zone self<block_start>"""Add network to vm in basic zone"""<line_sep># 1. Deploy a vm and create network in basic zone # 2. Try adding network to vm # Validate following # 1. API should throw exception saying Can't add a new nic to vm in basic network basicZone=<none><line_sep>zones=list_zones(self.apiclient available=<true>)<line_sep>list_zones_validation_result=validateList(zones)<line_sep>self.assertEqual(list_zones_validation_result[0] PASS "list zones validation failed due to: %s"%list_zones_validation_result[2])<for_stmt>zone zones<block_start><if_stmt>zone.networktype.lower()<eq>'BASIC'<block_start>basicZone=zone.id<line_sep><break><block_end><block_end><if_stmt>basicZone<is><none><block_start>self.skipTest("This test requires at least one basic zone to be present in the setup")<block_end>self.services["isolated_network"]["zoneid"]=basicZone.id<line_sep>self.debug("Creating isolated network in basic zone: %s"%basicZone.id)<line_sep>isolated_network=Network.create(self.apiclient self.services["isolated_network"] networkofferingid=self.isolated_network_offering.id)<line_sep>self.cleanup.append(isolated_network)<line_sep>self.debug("Created isolated network %s:"%isolated_network.id)<line_sep>self.services["virtual_machine"]["zoneid"]=basicZone.id<line_sep>self.debug("Deploying virtual machine in basic zone: %s"%basicZone.id)<line_sep>virtual_machine=VirtualMachine.create(self.apiclient self.services["virtual_machine"] serviceofferingid=self.service_offering.id mode=basicZone.networktype)<line_sep>self.cleanup.append(virtual_machine)<line_sep>time.sleep(self.services["sleep"])<line_sep>self.debug("Deployed virtual machine %s: "%virtual_machine.id)<line_sep>cmd=addNicToVirtualMachine.addNicToVirtualMachineCmd()<line_sep>cmd.virtualmachineid=virtual_machine.id<line_sep>cmd.networkid=isolated_network.id<line_sep>self.dedbug("Trying to add isolated network to VM (both in basic zone,\ this operation should fail")<with_stmt>self.assertRaises(Exception)<as>e<block_start>time.sleep(5)<line_sep>self.apiclient.addNicToVirtualMachine(cmd)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_26_add_nic_insufficient_permission self<block_start>"""Try to add network to vm with insufficient permission"""<line_sep># 1. Call add network to VM API with api client of other account # Validate the following: # 1. API should throw exception saying insufficient permission cmd=addNicToVirtualMachine.addNicToVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.networkid=self.isolated_network.id<line_sep>self.debug("Creating new account")<line_sep>account=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<line_sep>self.debug("Created account %s"%account.name)<line_sep>self.debug("creating user api client for account: %s"%account.name)<line_sep>api_client=self.testClient.getUserApiClient(UserName=account.name DomainName=self.account.domain)<line_sep>self.debug("Trying to add network to vm with this api client, this should fail due to \ insufficient permission")<with_stmt>self.assertRaises(Exception)<as>e<block_start>time.sleep(5)<line_sep>api_client.addNicToVirtualMachine(cmd)<block_end><return><block_end><block_end><class_stmt>TestFailureScenariosRemoveNicFromVM(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestFailureScenariosRemoveNicFromVM cls).getClsTestClient()<line_sep>cls.api_client=cls.testClient.getApiClient()<line_sep>cls.services=Services().services<line_sep>hypervisor=get_hypervisor_type(cls.api_client)<if_stmt>hypervisor.lower()<not><in>["xenserver" "kvm"]<block_start><raise>unittest.SkipTest("This feature is supported only on XenServer and KVM")<block_end># Get Zone, Domain and templates cls.domain=get_domain(cls.api_client)<line_sep>cls.zone=get_zone(cls.api_client cls.testClient.getZoneForTests())<line_sep>template=get_template(cls.api_client cls.zone.id cls.services["ostype"])<line_sep># Set Zones and disk offerings cls.services["virtual_machine"]["zoneid"]=cls.zone.id<line_sep>cls.services["virtual_machine"]["template"]=template.id<line_sep># Create Accounts & networks cls.services["isolated_network"]["zoneid"]=cls.zone.id<line_sep>cls.services["shared_network"]["zoneid"]=cls.zone.id<line_sep>cls._cleanup=[]<line_sep>cls.account=Account.create(cls.api_client cls.services["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep>cls.service_offering=ServiceOffering.create(cls.api_client cls.services["service_offering"])<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.virtual_machine=VirtualMachine.create(cls.api_client cls.services["virtual_machine"] accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id mode=cls.zone.networktype)<line_sep>cls._cleanup.append(cls.virtual_machine)<line_sep>cls.isolated_network_offering=NetworkOffering.create(cls.api_client cls.services["isolated_network_offering"] )<line_sep>cls._cleanup.append(cls.isolated_network_offering)<line_sep>cls.isolated_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.isolated_network=Network.create(cls.api_client cls.services["isolated_network"] cls.account.name cls.account.domainid networkofferingid=cls.isolated_network_offering.id)<line_sep>cls._cleanup.append(cls.isolated_network)<line_sep>cls.virtual_machine.add_nic(cls.api_client cls.isolated_network.id)<line_sep><return><block_end><def_stmt>setUp self<block_start>self.apiclient=self.testClient.getApiClient()<line_sep>self.dbclient=self.testClient.getDbConnection()<line_sep>self.cleanup=[]<block_end><def_stmt>tearDown self<block_start>super(TestFailureScenariosRemoveNicFromVM self).tearDown()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start>cls.isolated_network_offering.update(cls.api_client state='Disabled')<block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during disabling of network offering : %s"%e)<block_end>super(TestFailureScenariosRemoveNicFromVM cls).tearDownClass()<block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_19_remove_nic_wrong_vm_id self<block_start>"""Try to remove nic from a vm providing wrong vm id to API"""<line_sep># (Frist two steps are perfromed in setupClass) # 1. Deploy Vm in account # 2. Add network to VM # 3. Remove the nic added by the newly added network providing wrong vm id to the API # Validate the following: # 1. API throws exception unable to find a virtual machine with id vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>vm=vm_list_validation_result[1]<line_sep>nics=[x<for>x vm.nic<if>x.networkid<eq>self.isolated_network.id]<line_sep>self.assertEqual(len(nics) 1 "There should be exactly one nic corresponding to the isolate\ network %s"%self.isolated_network.id)<line_sep>cmd=removeNicFromVirtualMachine.removeNicFromVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id+random_gen()<line_sep>cmd.nicid=nics[0].id<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.removeNicFromVirtualMachine(cmd)<line_sep>self.debug("removeNicFromVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_20_remove_nic_wrong_nic_id self<block_start>"""Try to remove nic from a vm providing wrong nic id to API"""<line_sep># (Frist two steps are perfromed in setupClass) # 1. Deploy Vm in account # 2. Add network to VM # 3. Remove the nic added by the newly added network providing wrong nic id to the API # Validate the following: # 1. API throws exception unable to find nic with id vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>vm=vm_list_validation_result[1]<line_sep>nics=[x<for>x vm.nic<if>x.networkid<eq>self.isolated_network.id]<line_sep>self.assertEqual(len(nics) 1 "There should be exactly one nic corresponding to the isolate\ network %s"%self.isolated_network.id)<line_sep>cmd=removeNicFromVirtualMachine.removeNicFromVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.nicid=nics[0].id+random_gen()<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.removeNicFromVirtualMachine(cmd)<line_sep>self.debug("removeNicFromVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_27_remove_nic_insufficient_permission self<block_start>"""Try to remove nic from vm with insufficient permission"""<line_sep># 1. Call remove network from VM API with api client of other account # Validate the following: # 1. API should throw exception saying insufficient permission vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<line_sep>vm=vm_list_validation_result[1]<line_sep>nics=[x<for>x vm.nic<if>x.networkid<eq>self.isolated_network.id]<line_sep>self.assertEqual(len(nics) 1 "There should be exactly one nic corresponding to the isolate\ network %s"%self.isolated_network.id)<line_sep>cmd=removeNicFromVirtualMachine.removeNicFromVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.nicid=nics[0].id<line_sep>self.debug("Creating new account")<line_sep>account=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<line_sep>self.debug("Created account %s"%account.name)<line_sep>self.debug("creating user api client for account: %s"%account.name)<line_sep>api_client=self.testClient.getUserApiClient(UserName=account.name DomainName=self.account.domain)<line_sep>self.debug("Trying to add network to vm with this api client, this should fail due to \ insufficient permission")<with_stmt>self.assertRaises(Exception)<as>e<block_start>api_client.removeNicFromVirtualMachine(cmd)<line_sep>self.debug("removeNicFromVirtualMachine API failed with exception: %s"%e.exception)<block_end>self.apiclient.removeNicFromVirtualMachine(cmd)<line_sep><return><block_end><block_end><class_stmt>TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestFailureScenariosUpdateVirtualMachineNIC cls).getClsTestClient()<line_sep>cls.api_client=cls.testClient.getApiClient()<line_sep>cls.services=Services().services<line_sep>hypervisor=get_hypervisor_type(cls.api_client)<if_stmt>hypervisor.lower()<not><in>["xenserver" "kvm"]<block_start><raise>unittest.SkipTest("This feature is supported only on XenServer and KVM")<block_end># Get Zone, Domain and templates cls.domain=get_domain(cls.api_client)<line_sep>cls.zone=get_zone(cls.api_client cls.testClient.getZoneForTests())<line_sep>template=get_template(cls.api_client cls.zone.id cls.services["ostype"])<line_sep># Set Zones and disk offerings cls.services["virtual_machine"]["zoneid"]=cls.zone.id<line_sep>cls.services["virtual_machine"]["template"]=template.id<line_sep># Create Accounts & networks cls.services["isolated_network"]["zoneid"]=cls.zone.id<line_sep>cls.services["shared_network"]["zoneid"]=cls.zone.id<line_sep>cls._cleanup=[]<line_sep>cls.addednics=[]<line_sep>cls.account=Account.create(cls.api_client cls.services["account"] domainid=cls.domain.id)<line_sep>cls._cleanup.append(cls.account)<line_sep>cls.service_offering=ServiceOffering.create(cls.api_client cls.services["service_offering"])<line_sep>cls._cleanup.append(cls.service_offering)<line_sep>cls.virtual_machine=VirtualMachine.create(cls.api_client cls.services["virtual_machine"] accountid=cls.account.name domainid=cls.account.domainid serviceofferingid=cls.service_offering.id mode=cls.zone.networktype)<line_sep>cls._cleanup.append(cls.virtual_machine)<line_sep>cls.defaultNetworkId=cls.virtual_machine.nic[0].networkid<line_sep># Create Shared Network Offering cls.isolated_network_offering=NetworkOffering.create(cls.api_client cls.services["isolated_network_offering"] )<line_sep>cls._cleanup.append(cls.isolated_network_offering)<line_sep># Enable Isolated Network offering cls.isolated_network_offering.update(cls.api_client state='Enabled')<line_sep>cls.isolated_network=Network.create(cls.api_client cls.services["isolated_network"] cls.account.name cls.account.domainid networkofferingid=cls.isolated_network_offering.id)<line_sep>cls._cleanup.append(cls.isolated_network)<line_sep>vm_with_nic=cls.virtual_machine.add_nic(cls.api_client cls.isolated_network.id)<line_sep>nics=[x<for>x vm_with_nic.nic<if>x.networkid<eq>cls.isolated_network.id]<line_sep>cls.addednics.append(nics[-1])<line_sep><return><block_end><def_stmt>setUp self<block_start>self.apiclient=self.testClient.getApiClient()<line_sep>self.dbclient=self.testClient.getDbConnection()<line_sep>self.cleanup=[]<block_end><def_stmt>tearDown self<block_start>super(TestFailureScenariosUpdateVirtualMachineNIC self).tearDown()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><try_stmt><block_start><for_stmt>nic cls.addednics<block_start>cls.virtual_machine.remove_nic(cls.apiclient nic.id)<block_end><block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during removal of nics : %s"%e)<block_end><try_stmt><block_start>cls.isolated_network_offering.update(cls.api_client state='Disabled')<block_end><except_stmt>Exception<as>e<block_start>cls.debug("Exception during disabling of network offering : %s"%e)<block_end>super(TestFailureScenariosUpdateVirtualMachineNIC cls).tearDownClass()<block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_21_update_nic_wrong_vm_id self<block_start>"""update default nic of vm providing wrong vm id to the API"""<line_sep># (First two steps are performed in setupClass) # 1. Deploy Vm in account # 2. Add network to VM # 3. Update default nic of VM (Make the newly added NIC as default) by providing wrong # vm id to the API # Validate the following: # 1. API throws exception saying can't find the virtual machine self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<if_stmt>len(vm_list[0].nic)<ne>2<block_start>self.fail("VM should have exactly two NICs")<block_end>defaultNicIdBeforeUpdate=<none><line_sep>nonDefaultNicIdBeforeUpdate=<none><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicIdBeforeUpdate=nic.id<block_end><else_stmt><block_start>nonDefaultNicIdBeforeUpdate=nic.id<block_end><block_end>self.debug("Default nic of VM is %s and non default nic of VM is %s"%(defaultNicIdBeforeUpdate nonDefaultNicIdBeforeUpdate))<line_sep>self.debug("Making non default nic as default nic")<line_sep>cmd=updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id+random_gen()<line_sep>cmd.nicid=nonDefaultNicIdBeforeUpdate<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.updateDefaultNicForVirtualMachine(cmd)<line_sep>self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_22_update_nic_wrong_nic_id self<block_start>"""update default nic of vm providing wrong nic id to the API"""<line_sep># (First two steps are performed in setupClass) # 1. Deploy Vm in account # 2. Add network to VM # 3. Update default nic of VM (Make the newly added NIC as default) by providing wrong # nic id to the API # Validate the following: # 1. API throws exception saying can't find the nic with id self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<if_stmt>len(vm_list[0].nic)<ne>2<block_start>self.fail("VM should have exactly two NICs")<block_end>defaultNicIdBeforeUpdate=<none><line_sep>nonDefaultNicIdBeforeUpdate=<none><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicIdBeforeUpdate=nic.id<block_end><else_stmt><block_start>nonDefaultNicIdBeforeUpdate=nic.id<block_end><block_end>self.debug("Default nic of VM is %s and non default nic of VM is %s"%(defaultNicIdBeforeUpdate nonDefaultNicIdBeforeUpdate))<line_sep>self.debug("Making non default nic as default nic")<line_sep>cmd=updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.nicid=nonDefaultNicIdBeforeUpdate+random_gen()<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.updateDefaultNicForVirtualMachine(cmd)<line_sep>self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_23_update_nic_incorrect_vm_state self<block_start>"""update default nic of vm when vm is state is not Running or Stopped"""<line_sep># (First two steps are performed in setupClass) # 1. Deploy Vm in account # 2. Add network to VM # 3. Destroy virtual machine so that the VM state becomes Destroyed or Expunging # 4. Update default nic of VM (Make the newly added NIC as default) # Validate the following: # 1. API throws exception instance is not Running or Stopped self.debug("Creating new account")<line_sep>account=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<line_sep>self.debug("Creating virtual machine in the account %s"%account.name)<line_sep>virtual_machine=VirtualMachine.create(self.api_client self.services["virtual_machine"] accountid=account.name domainid=account.domainid serviceofferingid=self.service_offering.id mode=self.zone.networktype)<line_sep>time.sleep(self.services["sleep"])<line_sep>self.debug("Created virtual machine %s"%virtual_machine.id)<line_sep>self.debug("Creating isolated network in account %s"%account.name)<line_sep>isolated_network=Network.create(self.apiclient self.services["isolated_network"] account.name account.domainid networkofferingid=self.isolated_network_offering.id)<line_sep>self.debug("Created isolated network %s"%isolated_network.id)<line_sep>self.debug("Adding isolated network %s to vm %s"%(isolated_network.id virtual_machine.id))<line_sep>virtual_machine.add_nic(self.apiclient isolated_network.id)<line_sep>self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")<line_sep>vm_list=list_virtual_machines(self.apiclient id=virtual_machine.id listall=<true>)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<if_stmt>len(vm_list[0].nic)<ne>2<block_start>self.fail("VM should have exactly two NICs")<block_end>defaultNicIdBeforeUpdate=<none><line_sep>nonDefaultNicIdBeforeUpdate=<none><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicIdBeforeUpdate=nic.id<block_end><else_stmt><block_start>nonDefaultNicIdBeforeUpdate=nic.id<block_end><block_end>self.debug("Default nic of VM is %s and non default nic of VM is %s"%(defaultNicIdBeforeUpdate nonDefaultNicIdBeforeUpdate))<line_sep>self.debug("Destroying VM %s"%virtual_machine.id)<line_sep>virtual_machine.delete(self.apiclient expunge=<false>)<line_sep>self.debug("Making non default nic as default nic")<line_sep>cmd=updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()<line_sep>cmd.virtualmachineid=virtual_machine.id<line_sep>cmd.nicid=nonDefaultNicIdBeforeUpdate<with_stmt>self.assertRaises(Exception)<as>e<block_start>self.apiclient.updateDefaultNicForVirtualMachine(cmd)<line_sep>self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s"%e.exception)<block_end><return><block_end>@attr(tags=["advanced" "dvs"])<def_stmt>test_28_update_nic_insufficient_permission self<block_start>"""Try to update default nic of vm with insufficient permission"""<line_sep># 1. Call update nic of VM API with api client of other account # Validate the following: # 1. API should throw exception saying insufficient permission account=Account.create(self.apiclient self.services["account"] domainid=self.domain.id)<line_sep>self.cleanup.append(account)<line_sep>self.debug("Created account %s"%account.name)<line_sep>self.debug("creating user api client for account: %s"%account.name)<line_sep>api_client=self.testClient.getUserApiClient(UserName=account.name DomainName=self.account.domain)<line_sep>self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")<line_sep>vm_list=list_virtual_machines(self.apiclient id=self.virtual_machine.id)<line_sep>vm_list_validation_result=validateList(vm_list)<line_sep>self.assertEqual(vm_list_validation_result[0] PASS "vm list validation failed due to %s"%vm_list_validation_result[2])<if_stmt>len(vm_list[0].nic)<ne>2<block_start>self.fail("VM should have exactly two NICs")<block_end>defaultNicIdBeforeUpdate=<none><line_sep>nonDefaultNicIdBeforeUpdate=<none><for_stmt>nic vm_list[0].nic<block_start><if_stmt>nic.isdefault<block_start>defaultNicIdBeforeUpdate=nic.id<block_end><else_stmt><block_start>nonDefaultNicIdBeforeUpdate=nic.id<block_end><block_end>self.debug("Default nic of VM is %s and non default nic of VM is %s"%(defaultNicIdBeforeUpdate nonDefaultNicIdBeforeUpdate))<line_sep>self.debug("Making non default nic as default nic")<line_sep>cmd=updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()<line_sep>cmd.virtualmachineid=self.virtual_machine.id<line_sep>cmd.nicid=nonDefaultNicIdBeforeUpdate<with_stmt>self.assertRaises(Exception)<as>e<block_start>api_client.updateDefaultNicForVirtualMachine(cmd)<block_end><return><block_end><block_end>
#Declare variables to hold the file name and access mode fileName="GuestList.txt"<line_sep>accessMode="w"<line_sep>#Open the file for writing myFile=open(fileName accessMode)<line_sep>#Write the guest names and ages to the file #I can write an entire record in one write statement myFile.write("<NAME>,27\n")<line_sep>myFile.write("<NAME>,25\n")<line_sep>myFile.write("<NAME>,32\n")<line_sep>#I could write the name and age in separate write statements myFile.write("<NAME>")<line_sep>myFile.write(",36\n")<line_sep>myFile.write("<NAME>")<line_sep>myFile.write(",26\n")<line_sep>#Close the file myFile.close()<line_sep>
# Copyright (c) 2012-2019 Seafile Ltd. <import_stmt>logging<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>django.utils.translation ugettext<as>_<import_from_stmt>seaserv ccnet_api seafile_api<import_from_stmt>seahub.api2.permissions IsProVersion IsOrgAdminUser<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seahub.base.accounts User<import_from_stmt>seahub.base.templatetags.seahub_tags email2nickname email2contact_email<import_from_stmt>seahub.utils.timeutils timestamp_to_isoformat_timestr<import_from_stmt>seahub.utils.repo normalize_repo_status_code<import_from_stmt>seahub.api2.endpoints.group_owned_libraries get_group_id_by_repo_owner<import_from_stmt>seahub.group.utils group_id_to_name<line_sep>logger=logging.getLogger(__name__)<class_stmt>OrgAdminUserRepos(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>throttle_classes=(UserRateThrottle )<line_sep>permission_classes=(IsProVersion IsOrgAdminUser)<def_stmt>get self request org_id email<block_start>"""Org admin list user owned repos """<line_sep># resource check org_id=int(org_id)<if_stmt><not>ccnet_api.get_org_by_id(org_id)<block_start>error_msg='Organization %s not found.'%org_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><try_stmt><block_start>user=User.objects.get(email=email)<block_end><except_stmt>User.DoesNotExist<block_start>err_msg='User %s not found.'%email<line_sep><return>api_error(status.HTTP_404_NOT_FOUND err_msg)<block_end># permission check <if_stmt><not>ccnet_api.org_user_exists(org_id email)<block_start>err_msg=_('User %s not found in organization.')%email<line_sep><return>api_error(status.HTTP_404_NOT_FOUND err_msg)<block_end># list repos repo_info_list=list()<line_sep>owned_repos=seafile_api.get_org_owned_repo_list(org_id email)<for_stmt>r owned_repos# do not return virtual repos <block_start><if_stmt>r.is_virtual<block_start><continue><block_end>repo_info={"repo_id":r.id "repo_name":r.name "owner_email":email "owner_name":email2nickname(email) "owner_contact_email":email2contact_email(email) "last_modified":timestamp_to_isoformat_timestr(r.last_modify) "modifier_email":r.last_modifier "size":r.size "encrypted":r.encrypted "permission":'rw' # Always have read-write permission to owned repo "status":normalize_repo_status_code(r.status) }<line_sep>repo_info_list.append(repo_info)<block_end><return>Response({'repo_list':repo_info_list})<block_end><block_end><class_stmt>OrgAdminUserBesharedRepos(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>throttle_classes=(UserRateThrottle )<line_sep>permission_classes=(IsProVersion IsOrgAdminUser)<def_stmt>get self request org_id email<block_start>"""Org admin list repos by shared to user """<line_sep># resource check org_id=int(org_id)<if_stmt><not>ccnet_api.get_org_by_id(org_id)<block_start>error_msg='Organization %s not found.'%org_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><try_stmt><block_start>user=User.objects.get(email=email)<block_end><except_stmt>User.DoesNotExist<block_start>err_msg='User %s not found.'%email<line_sep><return>api_error(status.HTTP_404_NOT_FOUND err_msg)<block_end># permission check <if_stmt><not>ccnet_api.org_user_exists(org_id email)<block_start>err_msg=_('User %s not found in organization.')%email<line_sep><return>api_error(status.HTTP_404_NOT_FOUND err_msg)<block_end># list beshared repos repo_info_list=list()<line_sep>beshared_repos=seafile_api.get_org_share_in_repo_list(org_id email -1 -1)<for_stmt>r beshared_repos<block_start>owner_email=r.user<line_sep>group_name=''<line_sep>is_group_owned_repo=<false><if_stmt>'@seafile_group'<in>owner_email<block_start>is_group_owned_repo=<true><line_sep>group_id=get_group_id_by_repo_owner(owner_email)<line_sep>group_name=group_id_to_name(group_id)<block_end>owner_name=group_name<if>is_group_owned_repo<else>email2nickname(owner_email)<line_sep>owner_contact_email=''<if>is_group_owned_repo<else>email2contact_email(owner_email)<line_sep>repo_info={"repo_id":r.repo_id "repo_name":r.repo_name "last_modified":timestamp_to_isoformat_timestr(r.last_modify) "modifier_email":r.last_modifier "owner_email":owner_email "owner_name":owner_name "owner_contact_email":owner_contact_email "size":r.size "encrypted":r.encrypted "permission":r.permission "status":normalize_repo_status_code(r.status) }<line_sep>repo_info_list.append(repo_info)<block_end><return>Response({'repo_list':repo_info_list})<block_end><block_end>
<import_stmt>logging<import_from_stmt>logutils.colorize ColorizingStreamHandler<class_stmt>DefaultColorizer(ColorizingStreamHandler)<block_start>level_map={logging.DEBUG:(<none> 'blue' <true>) logging.INFO:(<none> <none> <true>) logging.WARNING:(<none> 'yellow' <true>) logging.ERROR:(<none> 'red' <true>) logging.CRITICAL:(<none> 'red' <true>) }<block_end><class_stmt>ColorFormatter(logging.Formatter)<block_start>""" A very basic logging formatter that not only applies color to the levels of the ouput but can also add padding to the the level names so that they do not alter the visuals of logging when presented on the terminal. The padding is provided by a convenient keyword that adds padding to the ``levelname`` so that log output is easier to follow:: %(padded_color_levelname)s Which would result in log level output that looks like:: [INFO ] [WARNING ] [ERROR ] [DEBUG ] [CRITICAL] If colored output is not supported, it falls back to non-colored output without any extra settings. """<def_stmt>__init__ self _logging=<none> colorizer=<none> *a **kw<block_start>self.logging=_logging<or>logging<line_sep>self.color=colorizer<or>DefaultColorizer()<line_sep>logging.Formatter.__init__(self *a **kw)<block_end><def_stmt>format self record<block_start>levelname=record.levelname<line_sep>padded_level='%-8s'%levelname<line_sep>record.color_levelname=self.color.colorize(levelname record)<line_sep>record.padded_color_levelname=self.color.colorize(padded_level record)<line_sep><return>self.logging.Formatter.format(self record)<block_end><block_end>
# Copyright 2016 <NAME>, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>six<import_stmt>numbers<import_stmt>token<import_from_stmt>. util<line_sep># Mapping of matching braces. To find a token here, look up token[:2]. _matching_pairs_left={(token.OP '('):(token.OP ')') (token.OP '['):(token.OP ']') (token.OP '{'):(token.OP '}') }<line_sep>_matching_pairs_right={(token.OP ')'):(token.OP '(') (token.OP ']'):(token.OP '[') (token.OP '}'):(token.OP '{') }<class_stmt>MarkTokens(object)<block_start>""" Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes to each of them. This is the heart of the token-marking logic. """<def_stmt>__init__ self code<block_start>self._code=code<line_sep>self._methods=util.NodeMethods()<line_sep>self._iter_children=<none><block_end><def_stmt>visit_tree self node<block_start>self._iter_children=util.iter_children_func(node)<line_sep>util.visit_tree(node self._visit_before_children self._visit_after_children)<block_end><def_stmt>_visit_before_children self node parent_token<block_start>col=getattr(node 'col_offset' <none>)<line_sep>token=self._code.get_token_from_utf8(node.lineno col)<if>col<is><not><none><else><none><if_stmt><not>token<and>util.is_module(node)# We'll assume that a Module node starts at the start of the source code. <block_start>token=self._code.get_token(1 0)<block_end># Use our own token, or our parent's if we don't have one, to pass to child calls as # parent_token argument. The second value becomes the token argument of _visit_after_children. <return>(token<or>parent_token token)<block_end><def_stmt>_visit_after_children self node parent_token token# This processes the node generically first, after all children have been processed. # Get the first and last tokens that belong to children. Note how this doesn't assume that we # iterate through children in order that corresponds to occurrence in source code. This # assumption can fail (e.g. with return annotations). <block_start>first=token<line_sep>last=<none><for_stmt>child self._iter_children(node)<block_start><if_stmt><not>first<or>child.first_token.index<l>first.index<block_start>first=child.first_token<block_end><if_stmt><not>last<or>child.last_token.index<g>last.index<block_start>last=child.last_token<block_end><block_end># If we don't have a first token from _visit_before_children, and there were no children, then # use the parent's token as the first token. first=first<or>parent_token<line_sep># If no children, set last token to the first one. last=last<or>first<line_sep># Statements continue to before NEWLINE. This helps cover a few different cases at once. <if_stmt>util.is_stmt(node)<block_start>last=self._find_last_in_line(last)<block_end># Capture any unmatched brackets. first,last=self._expand_to_matching_pairs(first last node)<line_sep># Give a chance to node-specific methods to adjust. nfirst,nlast=self._methods.get(self node.__class__)(node first last)<if_stmt>(nfirst nlast)<ne>(first last)# If anything changed, expand again to capture any unmatched brackets. <block_start>nfirst,nlast=self._expand_to_matching_pairs(nfirst nlast node)<block_end>node.first_token=nfirst<line_sep>node.last_token=nlast<block_end><def_stmt>_find_last_in_line self start_token<block_start><try_stmt><block_start>newline=self._code.find_token(start_token token.NEWLINE)<block_end><except_stmt>IndexError<block_start>newline=self._code.find_token(start_token token.ENDMARKER)<block_end><return>self._code.prev_token(newline)<block_end><def_stmt>_iter_non_child_tokens self first_token last_token node<block_start>""" Generates all tokens in [first_token, last_token] range that do not belong to any children of node. E.g. `foo(bar)` has children `foo` and `bar`, but we would yield the `(`. """<line_sep>tok=first_token<for_stmt>n self._iter_children(node)<block_start><for_stmt>t self._code.token_range(tok self._code.prev_token(n.first_token))<block_start><yield>t<block_end><if_stmt>n.last_token.index<ge>last_token.index<block_start><return><block_end>tok=self._code.next_token(n.last_token)<block_end><for_stmt>t self._code.token_range(tok last_token)<block_start><yield>t<block_end><block_end><def_stmt>_expand_to_matching_pairs self first_token last_token node<block_start>""" Scan tokens in [first_token, last_token] range that are between node's children, and for any unmatched brackets, adjust first/last tokens to include the closing pair. """<line_sep># We look for opening parens/braces among non-child tokens (i.e. tokens between our actual # child nodes). If we find any closing ones, we match them to the opens. to_match_right=[]<line_sep>to_match_left=[]<for_stmt>tok self._iter_non_child_tokens(first_token last_token node)<block_start>tok_info=tok[:2]<if_stmt>to_match_right<and>tok_info<eq>to_match_right[-1]<block_start>to_match_right.pop()<block_end><elif_stmt>tok_info<in>_matching_pairs_left<block_start>to_match_right.append(_matching_pairs_left[tok_info])<block_end><elif_stmt>tok_info<in>_matching_pairs_right<block_start>to_match_left.append(_matching_pairs_right[tok_info])<block_end><block_end># Once done, extend `last_token` to match any unclosed parens/braces. <for_stmt>match reversed(to_match_right)<block_start>last=self._code.next_token(last_token)<line_sep># Allow for a trailing comma before the closing delimiter. <if_stmt>util.match_token(last token.OP ',')<block_start>last=self._code.next_token(last)<block_end># Now check for the actual closing delimiter. <if_stmt>util.match_token(last *match)<block_start>last_token=last<block_end><block_end># And extend `first_token` to match any unclosed opening parens/braces. <for_stmt>match to_match_left<block_start>first=self._code.prev_token(first_token)<if_stmt>util.match_token(first *match)<block_start>first_token=first<block_end><block_end><return>(first_token last_token)<block_end>#---------------------------------------------------------------------- # Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair # that will actually be assigned. <def_stmt>visit_default self node first_token last_token# pylint: disable=no-self-use # By default, we don't need to adjust the token we computed earlier. <block_start><return>(first_token last_token)<block_end><def_stmt>handle_comp self open_brace node first_token last_token# For list/set/dict comprehensions, we only get the token of the first child, so adjust it to # include the opening brace (the closing brace will be matched automatically). <block_start>before=self._code.prev_token(first_token)<line_sep>util.expect_token(before token.OP open_brace)<line_sep><return>(before last_token)<block_end><def_stmt>visit_listcomp self node first_token last_token<block_start><return>self.handle_comp('[' node first_token last_token)<block_end><if_stmt>six.PY2# We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start. <block_start><def_stmt>visit_setcomp self node first_token last_token<block_start><return>self.handle_comp('{' node first_token last_token)<block_end><def_stmt>visit_dictcomp self node first_token last_token<block_start><return>self.handle_comp('{' node first_token last_token)<block_end><block_end><def_stmt>visit_comprehension self node first_token last_token# The 'comprehension' node starts with 'for' but we only get first child; we search backwards # to find the 'for' keyword. <block_start>first=self._code.find_token(first_token token.NAME 'for' reverse=<true>)<line_sep><return>(first last_token)<block_end><def_stmt>handle_attr self node first_token last_token# Attribute node has ".attr" (2 tokens) after the last child. <block_start>dot=self._code.find_token(last_token token.OP '.')<line_sep>name=self._code.next_token(dot)<line_sep>util.expect_token(name token.NAME)<line_sep><return>(first_token name)<block_end>visit_attribute=handle_attr<line_sep>visit_assignattr=handle_attr<line_sep>visit_delattr=handle_attr<def_stmt>handle_doc self node first_token last_token# With astroid, nodes that start with a doc-string can have an empty body, in which case we # need to adjust the last token to include the doc string. <block_start><if_stmt><not>node.body<and>getattr(node 'doc' <none>)<block_start>last_token=self._code.find_token(last_token token.STRING)<block_end><return>(first_token last_token)<block_end>visit_classdef=handle_doc<line_sep>visit_funcdef=handle_doc<def_stmt>visit_call self node first_token last_token# A function call isn't over until we see a closing paren. Remember that last_token is at the # end of all children, so we are not worried about encountering a paren that belongs to a # child. <block_start><return>(first_token self._code.find_token(last_token token.OP ')'))<block_end><def_stmt>visit_subscript self node first_token last_token# A subscript operations isn't over until we see a closing bracket. Similar to function calls. <block_start><return>(first_token self._code.find_token(last_token token.OP ']'))<block_end><def_stmt>visit_tuple self node first_token last_token# A tuple doesn't include parens; if there is a trailing comma, make it part of the tuple. <block_start><try_stmt><block_start>maybe_comma=self._code.next_token(last_token)<if_stmt>util.match_token(maybe_comma token.OP ',')<block_start>last_token=maybe_comma<block_end><block_end><except_stmt>IndexError<block_start><pass><block_end><return>(first_token last_token)<block_end><def_stmt>visit_str self node first_token last_token<block_start><return>self.handle_str(first_token last_token)<block_end><def_stmt>visit_joinedstr self node first_token last_token<block_start><return>self.handle_str(first_token last_token)<block_end><def_stmt>handle_str self first_token last_token# Multiple adjacent STRING tokens form a single string. <block_start>last=self._code.next_token(last_token)<while_stmt>util.match_token(last token.STRING)<block_start>last_token=last<line_sep>last=self._code.next_token(last_token)<block_end><return>(first_token last_token)<block_end><def_stmt>visit_num self node first_token last_token# A constant like '-1' gets turned into two tokens; this will skip the '-'. <block_start><while_stmt>util.match_token(last_token token.OP)<block_start>last_token=self._code.next_token(last_token)<block_end><return>(first_token last_token)<block_end># In Astroid, the Num and Str nodes are replaced by Const. <def_stmt>visit_const self node first_token last_token<block_start><if_stmt>isinstance(node.value numbers.Number)<block_start><return>self.visit_num(node first_token last_token)<block_end><elif_stmt>isinstance(node.value six.string_types)<block_start><return>self.visit_str(node first_token last_token)<block_end><return>(first_token last_token)<block_end><def_stmt>visit_keyword self node first_token last_token<block_start><if_stmt>node.arg<is><not><none><block_start>equals=self._code.find_token(first_token token.OP '=' reverse=<true>)<line_sep>name=self._code.prev_token(equals)<line_sep>util.expect_token(name token.NAME node.arg)<line_sep>first_token=name<block_end><return>(first_token last_token)<block_end><def_stmt>visit_starred self node first_token last_token# Astroid has 'Starred' nodes (for "foo(*bar)" type args), but they need to be adjusted. <block_start><if_stmt><not>util.match_token(first_token token.OP '*')<block_start>star=self._code.prev_token(first_token)<if_stmt>util.match_token(star token.OP '*')<block_start>first_token=star<block_end><block_end><return>(first_token last_token)<block_end><def_stmt>visit_assignname self node first_token last_token# Astroid may turn 'except' clause into AssignName, but we need to adjust it. <block_start><if_stmt>util.match_token(first_token token.NAME 'except')<block_start>colon=self._code.find_token(last_token token.OP ':')<line_sep>first_token=last_token=self._code.prev_token(colon)<block_end><return>(first_token last_token)<block_end><if_stmt>six.PY2# No need for this on Python3, which already handles 'with' nodes correctly. <block_start><def_stmt>visit_with self node first_token last_token<block_start>first=self._code.find_token(first_token token.NAME 'with' reverse=<true>)<line_sep><return>(first last_token)<block_end><block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>argparse<import_stmt>itertools<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>tempfile<import_stmt>zipfile<line_sep>CACHE_DIR="buck-cache"<class_stmt>CacheEntry(object)<block_start><pass><block_end><def_stmt>get_cache_entry path<block_start><with_stmt>zipfile.ZipFile(path)<as>f<block_start>entry_map={os.path.basename(n):n<for>n f.namelist()}<line_sep>entry=CacheEntry()<line_sep>entry.target=f.read(entry_map["TARGET"]).strip()<line_sep>entry.rule_key=f.read(entry_map["RULE_KEY"]).strip()<line_sep>entry.deps=json.loads(f.read(entry_map["DEPS"]))<line_sep>entry.path=path<line_sep><return>entry<block_end><block_end><def_stmt>get_cache_inventory <block_start>inventory={}<for_stmt>item os.listdir(CACHE_DIR)<block_start>entry=get_cache_entry(os.path.join(CACHE_DIR item))<line_sep>inventory[entry.target]=entry<block_end><return>inventory<block_end><def_stmt>get_missing_cache_entries inventory<block_start>""" Find and return all entries missing in the cache. """<line_sep>missing_entries={}<for_stmt>entry inventory.itervalues()<block_start><if_stmt><not>os.path.exists(entry.path)<block_start>missing_entries[entry.target]=entry<block_end><block_end><return>missing_entries<block_end><def_stmt>clear_cache <block_start>subprocess.check_call(["rm" "-rf" CACHE_DIR])<block_end><def_stmt>clear_output <block_start>subprocess.check_call(["rm" "-rf" "buck-out"])<block_end><def_stmt>run_buck buck *args<block_start>logging.info("Running {} {}".format(buck " ".join(args)))<line_sep># Always create a temp file, in case we need to serialize the # arguments to it. <with_stmt>tempfile.NamedTemporaryFile()<as>f# Point cache to a known location. <block_start>args.append("--config")<line_sep>args.append("cache.dir="+CACHE_DIR)<line_sep># If the command would be too long, put the args into a file and # execute that. <if_stmt>len(args)<g>30<block_start><for_stmt>arg args<block_start>f.write(arg)<line_sep>f.write(os.linesep)<block_end>f.flush()<line_sep>args=["@"+f.name]<block_end><return>subprocess.check_output([buck]+list(args))<block_end><block_end><def_stmt>preorder_traversal roots deps callback<block_start>""" Execute the given callback during a preorder traversal of the graph. """<line_sep># Keep track of all the nodes processed. seen=set()<def_stmt>traverse node callback chain# Make sure we only visit nodes once. <block_start><if_stmt>node<in>seen<block_start><return><block_end>seen.add(node)<line_sep># Run the callback with the current node and the chain of parent nodes we # traversed to find it. callback(node chain)<line_sep># Recurse on depednencies, making sure to update the visiter chain. <for_stmt>dep deps[node]<block_start>traverse(dep callback chain=chain+[node])<block_end><block_end># Traverse starting from all the roots. <for_stmt>root roots<block_start>traverse(root callback [])<block_end><block_end><def_stmt>build buck targets<block_start>""" Verify that each of the actions the run when building the given targets run correctly using a top-down build. """<line_sep># Now run a build to populate the cache. logging.info("Running a build to populate the cache")<line_sep>run_buck(buck "build" *targets)<line_sep># Find all targets reachable via the UI. out=run_buck(buck "audit" "dependencies" "--transitive" *targets)<line_sep>ui_targets=set(out.splitlines())<line_sep>ui_targets.update(targets)<line_sep># Grab an inventory of the cache and use it to form a dependency map. cache_inventory=get_cache_inventory()<line_sep>dependencies={n.target:n.deps<for>n cache_inventory.itervalues()}<line_sep># Keep track of all the processed nodes so we can print progress info. processed=set()<line_sep># The callback to run for each build rule. <def_stmt>handle current chain<block_start>logging.info("Processing {} ({}/{})".format(current len(processed) len(dependencies.keys())))<line_sep>processed.add(current)<line_sep># Empty the previous builds output. logging.info("Removing output from previous build")<line_sep>clear_output()<line_sep># Remove the cache entry for this target. entry=cache_inventory[current]<line_sep>os.remove(entry.path)<line_sep>logging.info(" removed {} => {}".format(current entry.path))<line_sep># Now run the build using the closest UI visible ancestor target. logging.info("Running the build to check "+current)<for_stmt>node itertools.chain([current] reversed(chain))<block_start><if_stmt>node<in>ui_targets<block_start>run_buck(buck "build" "--just-build" current node)<line_sep><break><block_end><block_end><else_stmt><block_start><assert_stmt><false> "couldn't find target in UI: "+node<block_end># We should *always* end with a full cache. logging.info("Verifying cache...")<line_sep>missing=get_missing_cache_entries(cache_inventory)<assert_stmt>len(missing)<eq>0 "\n".join(sorted(missing.keys()))<block_end>preorder_traversal(targets dependencies handle)<block_end><def_stmt>test buck targets<block_start>""" Test that we can run tests when pulling from the cache. """<line_sep># Find all test targets. test_targets=set()<line_sep>out=run_buck(buck "targets" "--json" *targets)<for_stmt>info json.loads(out)<block_start><if_stmt>info["buck.type"].endswith("_test")<block_start>test_targets.add("//"+info["buck.base_path"]+":"+info["name"])<block_end><block_end><if_stmt><not>test_targets<block_start><raise>Exception("no test targets")<block_end># Now run a build to populate the cache. logging.info("Running a build to populate the cache")<line_sep>run_buck(buck "build" *test_targets)<line_sep># Empty the build output. logging.info("Removing output from build")<line_sep>clear_output()<line_sep># Now run the test run_buck(buck "test" *test_targets)<block_end><def_stmt>main argv<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--buck" default="buck")<line_sep>parser.add_argument("command" choices=("build" "test"))<line_sep>parser.add_argument("targets" metavar="target" nargs="+")<line_sep>args=parser.parse_args(argv[1:])<line_sep>logging.basicConfig(level=logging.INFO format="%(asctime)s %(message)s" datefmt="%m/%d/%Y %I:%M:%S %p" )<line_sep># Resolve any aliases in the top-level targets. out=run_buck(args.buck "targets" *args.targets)<line_sep>targets=set(out.splitlines())<line_sep># Clear the cache and output directories to start with a clean slate. logging.info("Clearing output and cache")<line_sep>run_buck(args.buck "clean")<line_sep>clear_output()<line_sep>clear_cache()<line_sep># Run the subcommand <if_stmt>args.command<eq>"build"<block_start>build(args.buck targets)<block_end><elif_stmt>args.command<eq>"test"<block_start>test(args.buck targets)<block_end><else_stmt><block_start><raise>Exception("unknown command: "+args.command)<block_end><block_end>sys.exit(main(sys.argv))<line_sep>
<import_stmt>logging<line_sep>logger=logging.getLogger('base')<def_stmt>create_model opt<block_start>model=opt['model']<if_stmt>model<eq>'sr'<block_start><import_from_stmt>.SR_model SRModel<as>M<block_end><elif_stmt>model<eq>'srgan'<block_start><import_from_stmt>.SRGAN_model SRGANModel<as>M<block_end><elif_stmt>model<eq>'srragan'<block_start><import_from_stmt>.SRRaGAN_model SRRaGANModel<as>M<block_end><elif_stmt>model<eq>'sftgan'<block_start><import_from_stmt>.SFTGAN_ACD_model SFTGAN_ACD_Model<as>M<block_end><else_stmt><block_start><raise>NotImplementedError('Model [{:s}] not recognized.'.format(model))<block_end>m=M(opt)<line_sep>logger.info('Model [{:s}] is created.'.format(m.__class__.__name__))<line_sep><return>m<block_end>
<import_from_stmt>libsaas http parsers<import_from_stmt>libsaas.services base<class_stmt>FilesResource(base.RESTResource)<block_start>path='files'<def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>Files(FilesResource)<block_start>@base.apimethod<def_stmt>get self start=<none> limit=<none><block_start>""" Returns data about all files. Upstream documentation: https://developers.pipedrive.com/v1#methods-Files """<line_sep>params=base.get_params(<none> locals())<line_sep><return>http.Request('GET' self.get_url() params) parsers.parse_json<block_end><block_end><class_stmt>File(FilesResource)<block_start><pass><block_end>
# Copyright 2018-2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Unit tests for tape expansion stopping criteria and expansion functions. """<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>pennylane<as>qml<import_from_stmt>pennylane.wires Wires<class_stmt>TestCreateExpandFn<block_start>"""Test creating expansion functions from stopping criteria."""<line_sep>crit_0=(~qml.operation.is_trainable)|(qml.operation.has_gen&qml.operation.is_trainable)<line_sep>doc_0="Test docstring."<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(0.2 wires=0)<line_sep>qml.RY(qml.numpy.array(2.1 requires_grad=<true>) wires=1)<line_sep>qml.Rot(*qml.numpy.array([0.5 0.2 -0.1] requires_grad=<true>) wires=0)<block_end><def_stmt>test_create_expand_fn self<block_start>"""Test creation of expand_fn."""<line_sep>expand_fn=qml.transforms.create_expand_fn(depth=10 stop_at=self.crit_0 docstring=self.doc_0 )<assert_stmt>expand_fn.__doc__<eq>"Test docstring."<block_end><def_stmt>test_create_expand_fn_expansion self<block_start>"""Test expansion with created expand_fn."""<line_sep>expand_fn=qml.transforms.create_expand_fn(depth=10 stop_at=self.crit_0)<line_sep>new_tape=expand_fn(self.tape)<assert_stmt>new_tape.operations[0]<eq>self.tape.operations[0]<assert_stmt>new_tape.operations[1]<eq>self.tape.operations[1]<assert_stmt>[op.name<for>op new_tape.operations[2:]]<eq>["RZ" "RY" "RZ"]<assert_stmt>np.allclose([op.data<for>op new_tape.operations[2:]] [[0.5] [0.2] [-0.1]])<assert_stmt>[op.wires<for>op new_tape.operations[2:]]<eq>[qml.wires.Wires(0)]<times>3<block_end><def_stmt>test_create_expand_fn_dont_expand self<block_start>"""Test expansion is skipped with depth=0."""<line_sep>expand_fn=qml.transforms.create_expand_fn(depth=0 stop_at=self.crit_0)<line_sep>new_tape=expand_fn(self.tape)<assert_stmt>new_tape.operations<eq>self.tape.operations<block_end><def_stmt>test_device_and_stopping_expansion self mocker<block_start>"""Test that passing a device alongside a stopping condition ensures that all operations are expanded to match the devices default gate set"""<line_sep>dev=qml.device("default.qubit" wires=1)<line_sep>expand_fn=qml.transforms.create_expand_fn(device=dev depth=10 stop_at=self.crit_0)<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.U1(0.2 wires=0)<line_sep>qml.Rot(*qml.numpy.array([0.5 0.2 -0.1] requires_grad=<true>) wires=0)<block_end>spy_device=mocker.spy(dev "supports_operation")<line_sep>new_tape=expand_fn(tape)<line_sep>spy_device.assert_called()<assert_stmt>new_tape.operations[0].name<eq>"PhaseShift"<assert_stmt>[op.name<for>op new_tape.operations[1:]]<eq>["RZ" "RY" "RZ"]<block_end><def_stmt>test_device_only_expansion self mocker<block_start>"""Test that passing a device ensures that all operations are expanded to match the devices default gate set"""<line_sep>dev=qml.device("default.qubit" wires=1)<line_sep>expand_fn=qml.transforms.create_expand_fn(device=dev depth=10)<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.U1(0.2 wires=0)<line_sep>qml.Rot(*qml.numpy.array([0.5 0.2 -0.1] requires_grad=<true>) wires=0)<block_end>spy_device=mocker.spy(dev "supports_operation")<line_sep>new_tape=expand_fn(tape)<line_sep>spy_device.assert_called()<assert_stmt>len(new_tape.operations)<eq>2<assert_stmt>new_tape.operations[0].name<eq>"PhaseShift"<assert_stmt>new_tape.operations[1].name<eq>"Rot"<block_end><def_stmt>test_depth_only_expansion self<block_start>"""Test that passing a depth simply expands to that depth"""<line_sep>dev=qml.device("default.qubit" wires=0)<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(0.2 wires=0)<line_sep>qml.RY(qml.numpy.array(2.1 requires_grad=<true>) wires=1)<line_sep>qml.Rot(*qml.numpy.array([0.5 0.2 -0.1] requires_grad=<true>) wires=0)<line_sep>qml.templates.StronglyEntanglingLayers(qml.numpy.ones([2 2 3] requires_grad=<true>) wires=[0 1])<block_end>expand_fn=qml.transforms.create_expand_fn(depth=0)<line_sep>new_tape=expand_fn(tape)<assert_stmt>new_tape<is>tape<line_sep>expand_fn=qml.transforms.create_expand_fn(depth=10)<line_sep>new_tape=expand_fn(tape)<assert_stmt>new_tape.operations[0]<eq>tape.operations[0]<assert_stmt>new_tape.operations[1]<eq>tape.operations[1]<assert_stmt>[op.name<for>op new_tape.operations[2:5]]<eq>["RZ" "RY" "RZ"]<assert_stmt>len(new_tape.operations[6:])<eq>15<block_end><block_end><class_stmt>TestExpandMultipar<block_start>"""Test the expansion of multi-parameter gates."""<def_stmt>test_expand_multipar self<block_start>"""Test that a multi-parameter gate is decomposed correctly. And that single-parameter gates are not decomposed."""<line_sep>dev=qml.device("default.qubit" wires=3)<class_stmt>_CRX(qml.CRX)<block_start>name="_CRX"<line_sep>@staticmethod<def_stmt>decomposition theta wires<block_start><raise>NotImplementedError()<block_end><block_end><with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(1.5 wires=0)<line_sep>qml.Rot(-2.1 0.2 -0.418 wires=1)<line_sep>_CRX(1.5 wires=[0 2])<block_end>new_tape=qml.transforms.expand_multipar(tape)<line_sep>new_ops=new_tape.operations<assert_stmt>[op.name<for>op new_ops]<eq>["RX" "RZ" "RY" "RZ" "_CRX"]<block_end><def_stmt>test_no_generator_expansion self<block_start>"""Test that a gate is decomposed correctly if it has generator[0]==None."""<line_sep>dev=qml.device("default.qubit" wires=3)<class_stmt>_CRX(qml.CRX)<block_start><def_stmt>generator self<block_start><raise>qml.operations.GeneratorUndefinedError()<block_end><block_end><with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(1.5 wires=0)<line_sep>qml.RZ(-2.1 wires=1)<line_sep>qml.RY(0.2 wires=1)<line_sep>qml.RZ(-0.418 wires=1)<line_sep>_CRX(1.5 wires=[0 2])<block_end>new_tape=qml.transforms.expand_multipar(tape)<line_sep>new_ops=new_tape.operations<line_sep>expected=["RX" "RZ" "RY" "RZ" "RZ" "RY" "CNOT" "RY" "CNOT" "RZ"]<assert_stmt>[op.name<for>op new_ops]<eq>expected<block_end><block_end><class_stmt>TestExpandNonunitaryGen<block_start>"""Test the expansion of operations without a unitary generator."""<def_stmt>test_do_not_expand self<block_start>"""Test that a tape with single-parameter operations with unitary generators and non-parametric operations is not touched."""<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(0.2 wires=0)<line_sep>qml.Hadamard(0)<line_sep>qml.PauliRot(0.9 "XY" wires=[0 1])<line_sep>qml.SingleExcitationPlus(-1.2 wires=[1 0])<block_end>new_tape=qml.transforms.expand_nonunitary_gen(tape)<assert_stmt>tape.operations<eq>new_tape.operations<block_end><def_stmt>test_expand_multi_par self<block_start>"""Test that a tape with single-parameter operations with unitary generators and non-parametric operations is not touched."""<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(0.2 wires=0)<line_sep>qml.Hadamard(0)<line_sep>qml.Rot(0.9 1.2 -0.6 wires=0)<line_sep>qml.SingleExcitationPlus(-1.2 wires=[1 0])<block_end>new_tape=qml.transforms.expand_nonunitary_gen(tape)<line_sep>expanded=[qml.RZ(0.9 wires=0) qml.RY(1.2 wires=0) qml.RZ(-0.6 wires=0) ]<assert_stmt>tape.operations[:2]<eq>new_tape.operations[:2]<assert_stmt>all(exp.name<eq>new.name<for>exp,new zip(expanded new_tape.operations[2:5]))<assert_stmt>all(exp.data<eq>new.data<for>exp,new zip(expanded new_tape.operations[2:5]))<assert_stmt>all(exp.wires<eq>new.wires<for>exp,new zip(expanded new_tape.operations[2:5]))<assert_stmt>tape.operations[3:]<eq>new_tape.operations[5:]<block_end><def_stmt>test_expand_missing_generator self<block_start>"""Test that a tape with single-parameter operations with unitary generators and non-parametric operations is not touched."""<class_stmt>_PhaseShift(qml.PhaseShift)<block_start><def_stmt>generator self<block_start><return><none><block_end><block_end><with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(0.2 wires=0)<line_sep>qml.Hadamard(0)<line_sep>_PhaseShift(2.1 wires=1)<line_sep>qml.SingleExcitationPlus(-1.2 wires=[1 0])<block_end>new_tape=qml.transforms.expand_nonunitary_gen(tape)<assert_stmt>tape.operations[:2]<eq>new_tape.operations[:2]<line_sep>exp_op=new_tape.operations[2]<assert_stmt>exp_op.name<eq>"RZ"<and>exp_op.data<eq>[2.1]<and>exp_op.wires<eq>qml.wires.Wires(1)<assert_stmt>tape.operations[3:]<eq>new_tape.operations[3:]<block_end><def_stmt>test_expand_nonunitary_generator self<block_start>"""Test that a tape with single-parameter operations with unitary generators and non-parametric operations is not touched."""<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(0.2 wires=0)<line_sep>qml.Hadamard(0)<line_sep>qml.PhaseShift(2.1 wires=1)<line_sep>qml.SingleExcitationPlus(-1.2 wires=[1 0])<block_end>new_tape=qml.transforms.expand_nonunitary_gen(tape)<assert_stmt>tape.operations[:2]<eq>new_tape.operations[:2]<line_sep>exp_op=new_tape.operations[2]<assert_stmt>exp_op.name<eq>"RZ"<and>exp_op.data<eq>[2.1]<and>exp_op.wires<eq>qml.wires.Wires(1)<assert_stmt>tape.operations[3:]<eq>new_tape.operations[3:]<block_end><block_end><class_stmt>TestExpandInvalidTrainable<block_start>"""Tests for the gradient expand function"""<def_stmt>test_no_expansion self mocker<block_start>"""Test that a circuit with differentiable operations is not expanded"""<line_sep>x=qml.numpy.array(0.2 requires_grad=<true>)<line_sep>y=qml.numpy.array(0.1 requires_grad=<true>)<with_stmt>qml.tape.QuantumTape()<as>tape<block_start>qml.RX(x wires=0)<line_sep>qml.RY(y wires=1)<line_sep>qml.CNOT(wires=[0 1])<line_sep>qml.expval(qml.PauliZ(0))<block_end>spy=mocker.spy(tape "expand")<line_sep>new_tape=qml.transforms.expand_invalid_trainable(tape)<assert_stmt>new_tape<is>tape<line_sep>spy.assert_not_called()<block_end><def_stmt>test_trainable_nondiff_expansion self mocker<block_start>"""Test that a circuit with non-differentiable trainable operations is expanded"""<line_sep>x=qml.numpy.array(0.2 requires_grad=<true>)<line_sep>y=qml.numpy.array(0.1 requires_grad=<true>)<class_stmt>NonDiffPhaseShift(qml.PhaseShift)<block_start>grad_method=<none><block_end><with_stmt>qml.tape.QuantumTape()<as>tape<block_start>NonDiffPhaseShift(x wires=0)<line_sep>qml.RY(y wires=1)<line_sep>qml.CNOT(wires=[0 1])<line_sep>qml.expval(qml.PauliZ(0))<block_end>spy=mocker.spy(tape "expand")<line_sep>new_tape=qml.transforms.expand_invalid_trainable(tape)<assert_stmt>new_tape<is><not>tape<line_sep>spy.assert_called()<line_sep>new_tape.operations[0].name<eq>"RZ"<line_sep>new_tape.operations[0].grad_method<eq>"A"<line_sep>new_tape.operations[1].name<eq>"RY"<line_sep>new_tape.operations[2].name<eq>"CNOT"<block_end><def_stmt>test_nontrainable_nondiff self mocker<block_start>"""Test that a circuit with non-differentiable non-trainable operations is not expanded"""<line_sep>x=qml.numpy.array(0.2 requires_grad=<false>)<line_sep>y=qml.numpy.array(0.1 requires_grad=<true>)<class_stmt>NonDiffPhaseShift(qml.PhaseShift)<block_start>grad_method=<none><block_end><with_stmt>qml.tape.QuantumTape()<as>tape<block_start>NonDiffPhaseShift(x wires=0)<line_sep>qml.RY(y wires=1)<line_sep>qml.CNOT(wires=[0 1])<line_sep>qml.expval(qml.PauliZ(0))<block_end>params=tape.get_parameters(trainable_only=<false>)<line_sep>tape.trainable_params=qml.math.get_trainable_indices(params)<assert_stmt>tape.trainable_params<eq>[1]<line_sep>spy=mocker.spy(tape "expand")<line_sep>new_tape=qml.transforms.expand_invalid_trainable(tape)<assert_stmt>new_tape<is>tape<line_sep>spy.assert_not_called()<block_end><def_stmt>test_trainable_numeric self mocker<block_start>"""Test that a circuit with numeric differentiable trainable operations is *not* expanded"""<line_sep>x=qml.numpy.array(0.2 requires_grad=<true>)<line_sep>y=qml.numpy.array(0.1 requires_grad=<true>)<class_stmt>NonDiffPhaseShift(qml.PhaseShift)<block_start>grad_method="F"<block_end><with_stmt>qml.tape.QuantumTape()<as>tape<block_start>NonDiffPhaseShift(x wires=0)<line_sep>qml.RY(y wires=1)<line_sep>qml.CNOT(wires=[0 1])<line_sep>qml.expval(qml.PauliZ(0))<block_end>spy=mocker.spy(tape "expand")<line_sep>new_tape=qml.transforms.expand_invalid_trainable(tape)<assert_stmt>new_tape<is>tape<line_sep>spy.assert_not_called()<block_end><block_end># Custom decomposition functions for testing. <def_stmt>custom_cnot wires<block_start><return>[qml.Hadamard(wires=wires[1]) qml.CZ(wires=[wires[0] wires[1]]) qml.Hadamard(wires=wires[1]) ]<block_end><def_stmt>custom_hadamard wires<block_start><return>[qml.RZ(np.pi wires=wires) qml.RY(np.pi/2 wires=wires)]<block_end># Incorrect, for testing purposes only <def_stmt>custom_rx params wires<block_start><return>[qml.RY(params wires=wires) qml.Hadamard(wires=wires)]<block_end># To test the gradient; use circuit identity RY(theta) = X RY(-theta) X <def_stmt>custom_rot phi theta omega wires<block_start><return>[qml.RZ(phi wires=wires) qml.PauliX(wires=wires) qml.RY(-theta wires=wires) qml.PauliX(wires=wires) qml.RZ(omega wires=wires) ]<block_end># Decompose a template into another template <def_stmt>custom_basic_entangler_layers weights wires **kwargs<block_start><return>[qml.AngleEmbedding(weights[0] wires=wires) qml.broadcast(qml.CNOT pattern="ring" wires=wires) ]<block_end><class_stmt>TestCreateCustomDecompExpandFn<block_start>"""Tests for the gradient expand function"""<def_stmt>test_no_custom_decomp self<block_start>"""Test that sending an empty dictionary results in no decompositions."""<def_stmt>circuit <block_start>qml.Hadamard(wires=0)<line_sep>qml.CNOT(wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>original_dev=qml.device("default.qubit" wires=3)<line_sep>decomp_dev=qml.device("default.qubit" wires=3 custom_decomps={})<line_sep>original_qnode=qml.QNode(circuit original_dev expansion_strategy="device")<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>original_res=original_qnode()<line_sep>decomp_res=decomp_qnode()<assert_stmt>np.isclose(original_res decomp_res)<assert_stmt>[orig_op.name<eq>decomp_op.name<for>orig_op,decomp_op zip(original_qnode.qtape.operations decomp_qnode.qtape.operations)]<block_end><def_stmt>test_no_custom_decomp_template self<block_start>"""Test that sending an empty dictionary results in no decomposition when a template is involved, except the decomposition expected from the device."""<def_stmt>circuit <block_start>qml.BasicEntanglerLayers([[0.1 0.2]] wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>original_dev=qml.device("default.qubit" wires=3)<line_sep>decomp_dev=qml.device("default.qubit" wires=3 custom_decomps={})<line_sep>original_qnode=qml.QNode(circuit original_dev expansion_strategy="device")<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>original_res=original_qnode()<line_sep>decomp_res=decomp_qnode()<assert_stmt>np.isclose(original_res decomp_res)<assert_stmt>[orig_op.name<eq>decomp_op.name<for>orig_op,decomp_op zip(original_qnode.qtape.operations decomp_qnode.qtape.operations)]<block_end>@pytest.mark.parametrize("device_name" ["default.qubit" "lightning.qubit"])<def_stmt>test_one_custom_decomp self device_name<block_start>"""Test that specifying a single custom decomposition works as expected."""<def_stmt>circuit <block_start>qml.Hadamard(wires=0)<line_sep>qml.CNOT(wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>custom_decomps={"Hadamard":custom_hadamard}<line_sep>decomp_dev=qml.device(device_name wires=2 custom_decomps=custom_decomps)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>3<assert_stmt>decomp_ops[0].name<eq>"RZ"<assert_stmt>np.isclose(decomp_ops[0].parameters[0] np.pi)<assert_stmt>decomp_ops[1].name<eq>"RY"<assert_stmt>np.isclose(decomp_ops[1].parameters[0] np.pi/2)<assert_stmt>decomp_ops[2].name<eq>"CNOT"<block_end><def_stmt>test_no_decomp_with_depth_zero self<block_start>"""Test that specifying a single custom decomposition works as expected."""<def_stmt>circuit <block_start>qml.Hadamard(wires=0)<line_sep>qml.CNOT(wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>custom_decomps={"Hadamard":custom_hadamard "CNOT":custom_cnot}<line_sep>decomp_dev=qml.device("default.qubit" wires=2 custom_decomps=custom_decomps decomp_depth=0)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>2<assert_stmt>decomp_ops[0].name<eq>"Hadamard"<assert_stmt>decomp_ops[1].name<eq>"CNOT"<block_end><def_stmt>test_one_custom_decomp_gradient self<block_start>"""Test that gradients are still correctly computed after a decomposition that performs transpilation."""<def_stmt>circuit x<block_start>qml.Hadamard(wires=0)<line_sep>qml.Rot(x[0] x[1] x[2] wires=0)<line_sep>qml.Hadamard(wires=0)<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>original_dev=qml.device("default.qubit" wires=3)<line_sep>decomp_dev=qml.device("default.qubit" wires=3 custom_decomps={"Rot":custom_rot})<line_sep>original_qnode=qml.QNode(circuit original_dev expansion_strategy="device")<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>x=qml.numpy.array([0.2 0.3 0.4] requires_grad=<true>)<line_sep>original_res=original_qnode(x)<line_sep>decomp_res=decomp_qnode(x)<assert_stmt>np.allclose(original_res decomp_res)<line_sep>original_grad=qml.grad(original_qnode)(x)<line_sep>decomp_grad=qml.grad(decomp_qnode)(x)<assert_stmt>np.allclose(original_grad decomp_grad)<line_sep>expected_ops=["Hadamard" "RZ" "PauliX" "RY" "PauliX" "RZ" "Hadamard"]<assert_stmt>all([op.name<eq>name<for>op,name zip(decomp_qnode.qtape.operations expected_ops)])<block_end><def_stmt>test_nested_custom_decomp self<block_start>"""Test that specifying two custom decompositions that have interdependence works as expected."""<def_stmt>circuit <block_start>qml.Hadamard(wires=0)<line_sep>qml.CNOT(wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>custom_decomps={"Hadamard":custom_hadamard qml.CNOT:custom_cnot}<line_sep>decomp_dev=qml.device("default.qubit" wires=2 custom_decomps=custom_decomps)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>7<line_sep># Check the RZ gates are in the correct place <for_stmt>idx [0 2 5]<block_start><assert_stmt>decomp_ops[idx].name<eq>"RZ"<assert_stmt>np.isclose(decomp_ops[idx].parameters[0] np.pi)<block_end><assert_stmt>decomp_ops[0].wires<eq>Wires(0)<assert_stmt>decomp_ops[2].wires<eq>Wires(1)<assert_stmt>decomp_ops[5].wires<eq>Wires(1)<line_sep># Check RY are in the correct place <for_stmt>idx [1 3 6]<block_start><assert_stmt>decomp_ops[idx].name<eq>"RY"<assert_stmt>np.isclose(decomp_ops[idx].parameters[0] np.pi/2)<block_end><assert_stmt>decomp_ops[1].wires<eq>Wires(0)<assert_stmt>decomp_ops[3].wires<eq>Wires(1)<assert_stmt>decomp_ops[6].wires<eq>Wires(1)<assert_stmt>decomp_ops[4].name<eq>"CZ"<block_end><def_stmt>test_nested_custom_decomp_with_template self<block_start>"""Test that specifying two custom decompositions that have interdependence works as expected even when there is a template."""<def_stmt>circuit # -RX(0.1)-C- -> -RX(0.1)---C--- -> -RX(0.1)-----------------C---------------- # -RX(0.2)-X- -> -RX(0.2)-H-Z-H- -> -RX(0.2)-RZ(pi)-RY(pi/2)-Z-RY(pi/2)-RZ(pi)- <block_start>qml.BasicEntanglerLayers([[0.1 0.2]] wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>custom_decomps={"Hadamard":custom_hadamard qml.CNOT:custom_cnot}<line_sep>decomp_dev=qml.device("default.qubit" wires=2 custom_decomps=custom_decomps)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>7<assert_stmt>decomp_ops[0].name<eq>"RX"<assert_stmt>decomp_ops[0].parameters[0]<eq>0.1<assert_stmt>decomp_ops[0].wires<eq>Wires(0)<assert_stmt>decomp_ops[1].name<eq>"RX"<assert_stmt>decomp_ops[1].parameters[0]<eq>0.2<assert_stmt>decomp_ops[1].wires<eq>Wires(1)<assert_stmt>decomp_ops[2].name<eq>"RZ"<assert_stmt>np.isclose(decomp_ops[2].parameters[0] np.pi)<assert_stmt>decomp_ops[2].wires<eq>Wires(1)<assert_stmt>decomp_ops[3].name<eq>"RY"<assert_stmt>np.isclose(decomp_ops[3].parameters[0] np.pi/2)<assert_stmt>decomp_ops[3].wires<eq>Wires(1)<assert_stmt>decomp_ops[4].name<eq>"CZ"<assert_stmt>decomp_ops[4].wires<eq>Wires([0 1])<assert_stmt>decomp_ops[5].name<eq>"RZ"<assert_stmt>np.isclose(decomp_ops[5].parameters[0] np.pi)<assert_stmt>decomp_ops[5].wires<eq>Wires(1)<assert_stmt>decomp_ops[6].name<eq>"RY"<assert_stmt>np.isclose(decomp_ops[6].parameters[0] np.pi/2)<assert_stmt>decomp_ops[6].wires<eq>Wires(1)<block_end><def_stmt>test_custom_decomp_template_to_template self<block_start>"""Test that decomposing a template into another template and some gates yields the correct results."""<def_stmt>circuit <block_start>qml.BasicEntanglerLayers([[0.1 0.2]] wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end># BasicEntanglerLayers custom decomposition involves AngleEmbedding custom_decomps={"BasicEntanglerLayers":custom_basic_entangler_layers "RX":custom_rx}<line_sep>decomp_dev=qml.device("default.qubit" wires=2 custom_decomps=custom_decomps)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>5<assert_stmt>decomp_ops[0].name<eq>"RY"<assert_stmt>decomp_ops[0].parameters[0]<eq>0.1<assert_stmt>decomp_ops[0].wires<eq>Wires(0)<assert_stmt>decomp_ops[1].name<eq>"Hadamard"<assert_stmt>decomp_ops[1].wires<eq>Wires(0)<assert_stmt>decomp_ops[2].name<eq>"RY"<assert_stmt>np.isclose(decomp_ops[2].parameters[0] 0.2)<assert_stmt>decomp_ops[2].wires<eq>Wires(1)<assert_stmt>decomp_ops[3].name<eq>"Hadamard"<assert_stmt>decomp_ops[3].wires<eq>Wires(1)<assert_stmt>decomp_ops[4].name<eq>"CNOT"<assert_stmt>decomp_ops[4].wires<eq>Wires([0 1])<block_end><def_stmt>test_custom_decomp_different_depth self<block_start>"""Test that alternative expansion depths can be specified."""<def_stmt>circuit <block_start>qml.BasicEntanglerLayers([[0.1 0.2]] wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(0))<block_end># BasicEntanglerLayers custom decomposition involves AngleEmbedding. If # expansion depth is 2, the AngleEmbedding will still be decomposed into # RX (since it's not a supported operation on the device), but the RX will # not be further decomposed even though the custom decomposition is specified. custom_decomps={"BasicEntanglerLayers":custom_basic_entangler_layers "RX":custom_rx}<line_sep>decomp_dev=qml.device("default.qubit" wires=2 custom_decomps=custom_decomps decomp_depth=2)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>3<assert_stmt>decomp_ops[0].name<eq>"RX"<assert_stmt>np.isclose(decomp_ops[0].parameters[0] 0.1)<assert_stmt>decomp_ops[0].wires<eq>Wires(0)<assert_stmt>decomp_ops[1].name<eq>"RX"<assert_stmt>np.isclose(decomp_ops[1].parameters[0] 0.2)<assert_stmt>decomp_ops[1].wires<eq>Wires(1)<assert_stmt>decomp_ops[2].name<eq>"CNOT"<assert_stmt>decomp_ops[2].wires<eq>Wires([0 1])<block_end><def_stmt>test_custom_decomp_with_adjoint self<block_start>"""Test that applying an adjoint in the circuit results in the adjoint undergoing the custom decomposition."""<def_stmt>circuit # Adjoint is RX(-0.2), so expect RY(-0.2) H <block_start>qml.adjoint(qml.RX)(0.2 wires="a")<line_sep><return>qml.expval(qml.PauliZ("a"))<block_end>custom_decomps={qml.RX:custom_rx}<line_sep>decomp_dev=qml.device("default.qubit" wires="a" custom_decomps=custom_decomps)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>2<assert_stmt>decomp_ops[0].name<eq>"RY"<assert_stmt>decomp_ops[0].parameters[0]<eq>-0.2<assert_stmt>decomp_ops[0].wires<eq>Wires("a")<assert_stmt>decomp_ops[1].name<eq>"Hadamard"<assert_stmt>decomp_ops[1].wires<eq>Wires("a")<block_end><def_stmt>test_custom_decomp_with_control self<block_start>"""Test that applying a controlled version of a gate results in the controlled version of a decomposition."""<def_stmt>circuit <block_start>qml.ctrl(qml.Hadamard control=0)(wires=1)<line_sep><return>qml.expval(qml.PauliZ(0))<block_end>custom_decomps={qml.Hadamard:custom_hadamard}<line_sep>decomp_dev=qml.device("default.qubit" wires=2 custom_decomps=custom_decomps)<line_sep>decomp_qnode=qml.QNode(circuit decomp_dev expansion_strategy="device")<line_sep>_=decomp_qnode()<line_sep>decomp_ops=decomp_qnode.qtape.operations<assert_stmt>len(decomp_ops)<eq>2<assert_stmt>decomp_ops[0].name<eq>"CRZ"<assert_stmt>np.isclose(decomp_ops[0].parameters[0] np.pi)<assert_stmt>decomp_ops[0].wires<eq>Wires([0 1])<assert_stmt>decomp_ops[1].name<eq>"CRY"<assert_stmt>np.isclose(decomp_ops[1].parameters[0] np.pi/2)<assert_stmt>decomp_ops[1].wires<eq>Wires([0 1])<block_end><def_stmt>test_custom_decomp_in_separate_context self<block_start>"""Test that the set_decomposition context manager works."""<line_sep>dev=qml.device("default.qubit" wires=2)<line_sep>@qml.qnode(dev expansion_strategy="device")<def_stmt>circuit <block_start>qml.CNOT(wires=[0 1])<line_sep><return>qml.expval(qml.PauliZ(wires=0))<block_end># Initial test _=circuit()<assert_stmt>len(circuit.qtape.operations)<eq>1<assert_stmt>circuit.qtape.operations[0].name<eq>"CNOT"<assert_stmt>dev.custom_expand_fn<is><none><line_sep># Test within the context manager <with_stmt>qml.transforms.set_decomposition({qml.CNOT:custom_cnot} dev)<block_start>_=circuit()<line_sep>ops_in_context=circuit.qtape.operations<assert_stmt>dev.custom_expand_fn<is><not><none><block_end><assert_stmt>len(ops_in_context)<eq>3<assert_stmt>ops_in_context[0].name<eq>"Hadamard"<assert_stmt>ops_in_context[1].name<eq>"CZ"<assert_stmt>ops_in_context[2].name<eq>"Hadamard"<line_sep># Check that afterwards, the device has gone back to normal _=circuit()<assert_stmt>len(circuit.qtape.operations)<eq>1<assert_stmt>circuit.qtape.operations[0].name<eq>"CNOT"<assert_stmt>dev.custom_expand_fn<is><none><block_end><def_stmt>test_custom_decomp_used_twice self<block_start>"""Test that creating a custom decomposition includes overwriting the correct method under the hood and produces expected results."""<line_sep>res=[]<for_stmt>i range(2)<block_start>custom_decomps={"MultiRZ":qml.MultiRZ.compute_decomposition}<line_sep>dev=qml.device("lightning.qubit" wires=2 custom_decomps=custom_decomps)<line_sep>@qml.qnode(dev diff_method="adjoint")<def_stmt>cost theta<block_start>qml.Hadamard(wires=0)<line_sep>qml.Hadamard(wires=1)<line_sep>qml.MultiRZ(theta wires=[1 0])<line_sep><return>qml.expval(qml.PauliX(1))<block_end>x=np.array(0.5)<line_sep>res.append(cost(x))<block_end><assert_stmt>res[0]<eq>res[1]<block_end><block_end>
<import_stmt>json<import_stmt>gramex.cache<import_stmt>pandas<as>pd<import_from_stmt>. TestGramex<import_from_stmt>gramex.http FOUND<import_from_stmt>pandas.util.testing assert_frame_equal<as>afe<class_stmt>TestFunctionHandler(TestGramex)<block_start><def_stmt>test_args self<block_start>etag={'headers':{'Etag':<true>}}<line_sep>text='{"args": [0, 1], "kwargs": {"a": "a", "b": "b"}}'<line_sep>self.check('/func/args' text=text **etag)<line_sep>self.check('/func/args-split' text=text **etag)<line_sep>text='{"args": ["abc", 1], "kwargs": {"a": "abc", "b": 1}}'<line_sep>self.check('/func/args-variable' text=text **etag)<line_sep>self.check('/func/handler' text='{"args": ["Handler"], "kwargs": {}' **etag)<line_sep>self.check('/func/handler-null' text='{"args": [], "kwargs": {}' **etag)<line_sep>self.check('/func/composite' text='{"args": [0, "Handler"], "kwargs": {"a": "a", "handler": "Handler"}}' **etag)<line_sep>text='{"args": [0, "Handler"], "kwargs": {"a": {"b": 1}, "handler": "Handler"}}'<line_sep>self.check('/func/compositenested' text=text **etag)<line_sep>self.check('/func/compositenested-split' text=text **etag)<line_sep>self.check('/func/compositenested-variable' text=text **etag)<line_sep>self.check('/func/dumpx?x=1&x=2' text='{"args": [["1", "2"]], "kwargs": {}}' **etag)<block_end><def_stmt>test_async self<block_start>etag={'headers':{'Etag':<true>}}<line_sep>text='{"args": [0, 1], "kwargs": {"a": "a", "b": "b"}}'<line_sep>self.check('/func/async/args' text=text **etag)<line_sep>self.check('/func/async/args-split' text=text **etag)<line_sep>self.check('/func/async/http' text='{"args": [["1", "2"]], "kwargs": {}}' **etag)<line_sep>self.check('/func/async/http2' text='{"args": [["1"]], "kwargs": {}}{"args": [["2"]], "kwargs": {}}' **etag)<line_sep>self.check('/func/async/calc' text='[[250,250,250],[250,250,250],[250,250,250],[250,250,250]]' **etag)<block_end><def_stmt>test_json self<block_start>self.check('/func/numpytypes')<block_end><def_stmt>test_iterator self<block_start>no_etag={'headers':{'Etag':<false>}}<line_sep>self.check('/func/iterator?x=1&x=2&x=3' text='123' **no_etag)<line_sep>self.check('/func/iterator/async?x=1&x=2&x=3' text='123' **no_etag)<block_end><def_stmt>test_redirect self<block_start>r=self.get('/func/redirect' allow_redirects=<false>)<line_sep>self.assertEqual(r.headers.get('Location') '/dir/index/')<line_sep>self.assertEqual(r.headers.get('Increment') '1')<line_sep>r=self.get('/func/redirect?next=/abc' allow_redirects=<false>)<line_sep>self.assertEqual(r.headers.get('Location') '/abc')<line_sep>self.assertEqual(r.headers.get('Increment') '2')<line_sep>r=self.get('/func/redirect' headers={'NEXT':'/abc'} allow_redirects=<false>)<line_sep>self.assertEqual(r.headers.get('Location') '/abc')<line_sep>self.assertEqual(r.headers.get('Increment') '3')<line_sep>r=self.get('/func/redirect?next=/def' headers={'NEXT':'/abc'} allow_redirects=<false>)<line_sep>self.assertEqual(r.headers.get('Location') '/def')<line_sep>self.assertEqual(r.headers.get('Increment') '4')<block_end><def_stmt>test_path_args self<block_start>self.check('/func/path_args/高/兴' text='["\\u9ad8", "\\u5174"]')<block_end><def_stmt>test_methods self<block_start>self.check('/func/methods' method='get' code=405)<line_sep>self.check('/func/methods' method='delete' code=405)<for_stmt>method ['post' 'put']<block_start>r=self.get('/func/methods' method=method headers={'NEXT':'/abc'} allow_redirects=<false>)<line_sep>self.assertEqual(r.status_code FOUND)<line_sep>self.assertEqual(r.headers.get('Location') '/abc')<block_end><block_end><block_end><class_stmt>TestWrapper(TestGramex)<block_start><def_stmt>test_config_kwargs self<block_start>self.check('/func/power?y=3' text='9.0')<line_sep>self.check('/func/power?y=3&x=3' text='27.0')<block_end><def_stmt>test_yielder self<block_start>self.check('/func/yielder?i=a&i=b&i=c' text='abc')<block_end><def_stmt>test_add_handler_get self<block_start>self.check('/func/total/40/2' text='42.0')<line_sep>self.check('/func/total/40/2?items=10' text='52.0')<line_sep>self.check('/func/total/40/2?items=10&items=10' text='62.0')<line_sep>self.check('/func/name_age/johndoe/age/42' text='johndoe is 42 years old.')<line_sep>self.check('/func/name_age' text='alpha is 10 years old.')<line_sep>self.check('/func/name_age?name=johndoe&age=42' text='johndoe is 42 years old.')<line_sep># In case of multiple kwargs, the last parameter is picked self.check('/func/name_age?name=x&name=y&age=1&age=2' text='y is 2 years old.')<line_sep># When type hints are violated: self.check('/func/hints?name=johndoe&age=42.3' code=500)<line_sep># When multiple arguments are passed: self.check('/func/total?items=1&items=2&items=3' text='6.0')<line_sep>self.check('/func/multilist?items=1&items=2&items=3&start=1' text='7.0')<line_sep># Positional args with types self.check('/func/strtotal?items=a&items=b&items=c' text='abc')<line_sep># Test native types. Note: "i=false" won't work -- use "i=" since it's a np.bool8 # Note: datetimes must be quoted, since they'll be read as JSON usually. self.check('/func/nativetypes?a=3&b=1.5&c=false&d=d&e=null&f=3&g=1.5&h=h&i=' text=''.join(['3' '1.5' 'false' 'd' '' '3' '1.5' 'h' 'false' '"2020-01-01T00:00:00+00:00"' '{"a":3,"b":1.5}' '[3,1.5]']))<line_sep>self.check('/func/greet' text='Hello, Stranger!')<line_sep>self.check('/func/greet?name=gramex' text='Hello, gramex!')<line_sep>self.check('/func/multilist?items=1&items=2&items=3&start=1' text='7.0')<line_sep>sales=self.check('/func/sales').json()<line_sep>afe(pd.DataFrame(sales) gramex.cache.open('sales.xlsx' rel=<true>))<line_sep>self.check('/func/content/003.json' text='{"x":3}' headers={'Content-Type':'application/json'})<line_sep>self.check('/func/content/003.txt' text='x=3' headers={'Content-Type':'text/plain'})<block_end><def_stmt>test_add_handler_post self<block_start>self.check('/func/name_age' method='post' data={'name':'johndoe' 'age':'42'} text='johndoe is 42 years old.')<line_sep>self.check('/func/name_age' method='post' data=json.dumps({'name':'johndoe' 'age':'42'}) request_headers={'Content-Type':'application/json'} text='johndoe is 42 years old.')<line_sep># When type hints are violated: self.check('/func/hints' method='post' data={'name':'johndoe' 'age':'42.3'} code=500)<line_sep># Check typecasting self.check('/func/nativetypes' method='post' data=json.dumps({'a':3 'b':1.5 'c':<false> 'd':'d' 'e':<none> 'f':3 'g':1.5 'h':'h' 'i':<false>}) request_headers={'Content-Type':'application/json'} text=''.join(['3' '1.5' 'false' 'd' '' '3' '1.5' 'h' 'false' '"2020-01-01T00:00:00+00:00"' '{"a":3,"b":1.5}' '[3,1.5]']))<line_sep>self.check('/func/greet' text='Hello, Stranger!')<line_sep># Check if POSTing url params and path args works self.check('/func/name_age?name=johndoe&age=42' method='post' text='johndoe is 42 years old.')<line_sep>self.check('/func/name_age/johndoe/age/42' text='johndoe is 42 years old.')<block_end><def_stmt>test_add_handler_delete self<block_start>self.check('/func/total/40/2?items=10&items=20' text='72.0' method='delete')<block_end><block_end>
""" Various tensorflow utilities """<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.contrib.framework.python.ops add_arg_scope<import_from_stmt>tensorflow.python.ops variables<import_stmt>functools<def_stmt>passthrough obj value<block_start><return>value<block_end><try_stmt><block_start>variables.Variable._build_initializer_expr=passthrough<block_end><except_stmt># older versions of TF don't have this <block_start><pass><block_end><def_stmt>int_shape x<block_start><return>list(map(int x.get_shape()))<block_end><def_stmt>concat_elu x<block_start>""" like concatenated ReLU (http://arxiv.org/abs/1603.05201), but then with ELU """<line_sep>axis=len(x.get_shape())-1<line_sep><return>tf.nn.elu(tf.concat([x -x] axis))<block_end><def_stmt>log_sum_exp x<block_start>""" numerically stable log_sum_exp implementation that prevents overflow """<line_sep>axis=len(x.get_shape())-1<line_sep>m=tf.reduce_max(x axis)<line_sep>m2=tf.reduce_max(x axis keep_dims=<true>)<line_sep><return>m+tf.log(tf.reduce_sum(tf.exp(x-m2) axis))<block_end><def_stmt>log_prob_from_logits x<block_start>""" numerically stable log_softmax implementation that prevents overflow """<line_sep>axis=len(x.get_shape())-1<line_sep>m=tf.reduce_max(x axis keep_dims=<true>)<line_sep><return>x-m-tf.log(tf.reduce_sum(tf.exp(x-m) axis keep_dims=<true>))<block_end><def_stmt>discretized_mix_logistic_loss x l sum_all=<true><block_start>""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """<line_sep>xs=int_shape(x)<line_sep># true image (i.e. labels) to regress to, e.g. (B,32,32,3) ls=int_shape(l)# predicted distribution, e.g. (B,32,32,100) # here and below: unpacking the params of the mixture of logistics nr_mix=int(ls[-1]/10)<line_sep>logit_probs=l[: : : :nr_mix]<line_sep>l=tf.reshape(l[: : : nr_mix:] xs+[nr_mix<times>3])<line_sep>means=l[: : : : :nr_mix]<line_sep>log_scales=tf.maximum(l[: : : : nr_mix:2<times>nr_mix] -7.)<line_sep>coeffs=tf.nn.tanh(l[: : : : 2<times>nr_mix:3<times>nr_mix])<line_sep># here and below: getting the means and adjusting them based on preceding # sub-pixels x=tf.reshape(x xs+[1])+tf.zeros(xs+[nr_mix])<line_sep>m2=tf.reshape(means[: : : 1 :]+coeffs[: : : 0 :]<times>x[: : : 0 :] [xs[0] xs[1] xs[2] 1 nr_mix])<line_sep>m3=tf.reshape(means[: : : 2 :]+coeffs[: : : 1 :]<times>x[: : : 0 :]+coeffs[: : : 2 :]<times>x[: : : 1 :] [xs[0] xs[1] xs[2] 1 nr_mix])<line_sep>means=tf.concat([tf.reshape(means[: : : 0 :] [xs[0] xs[1] xs[2] 1 nr_mix]) m2 m3] 3)<line_sep>centered_x=x-means<line_sep>inv_stdv=tf.exp(-log_scales)<line_sep>plus_in=inv_stdv<times>(centered_x+1./255.)<line_sep>cdf_plus=tf.nn.sigmoid(plus_in)<line_sep>min_in=inv_stdv<times>(centered_x-1./255.)<line_sep>cdf_min=tf.nn.sigmoid(min_in)<line_sep># log probability for edge case of 0 (before scaling) log_cdf_plus=plus_in-tf.nn.softplus(plus_in)<line_sep># log probability for edge case of 255 (before scaling) log_one_minus_cdf_min=-tf.nn.softplus(min_in)<line_sep>cdf_delta=cdf_plus-cdf_min# probability for all other cases mid_in=inv_stdv<times>centered_x<line_sep># log probability in the center of the bin, to be used in extreme cases # (not actually used in our code) log_pdf_mid=mid_in-log_scales-2.<times>tf.nn.softplus(mid_in)<line_sep># now select the right output: left edge case, right edge case, normal # case, extremely low prob case (doesn't actually happen for us) # this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select() # log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta))) # robust version, that still works if probabilities are below 1e-5 (which never happens in our code) # tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs # the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue # if the probability on a sub-pixel is below 1e-5, we use an approximation # based on the assumption that the log-density is constant in the bin of # the observed sub-pixel value log_probs=tf.where(x<l>-0.999 log_cdf_plus tf.where(x<g>0.999 log_one_minus_cdf_min tf.where(cdf_delta<g>1e-5 tf.log(tf.maximum(cdf_delta 1e-12)) log_pdf_mid-np.log(127.5))))<line_sep>log_probs=tf.reduce_sum(log_probs 3)+log_prob_from_logits(logit_probs)<if_stmt>sum_all<block_start><return>-tf.reduce_sum(log_sum_exp(log_probs))<block_end><else_stmt><block_start><return>-tf.reduce_sum(log_sum_exp(log_probs) [1 2])<block_end><block_end><def_stmt>discretized_mix_logistic_loss_per_chn x lr lg lb sum_all=<true><block_start>""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """<line_sep>xs=int_shape(x)# true image (i.e. labels) to regress to, e.g. (B,32,32,3) ls=int_shape(lr)# predicted distribution, e.g. (B,32,32,100) # here and below: unpacking the params of the mixture of logistics nr_mix=int(ls[-1]/3)<line_sep>logit_probs=lr[: : : :nr_mix]<line_sep>means=tf.concat([lr[: : : <none> nr_mix:nr_mix<times>2] lg[: : : <none> nr_mix:nr_mix<times>2] lb[: : : <none> nr_mix:nr_mix<times>2] ] axis=-2)<line_sep>log_scales=tf.concat([lr[: : : <none> nr_mix<times>2:nr_mix<times>3] lg[: : : <none> nr_mix<times>2:nr_mix<times>3] lb[: : : <none> nr_mix<times>2:nr_mix<times>3] ] axis=-2)<line_sep>log_scales=tf.maximum(log_scales -7.)<line_sep>x=tf.reshape(x xs+[1])+tf.zeros(xs+[nr_mix])<line_sep>centered_x=x-means<line_sep>inv_stdv=tf.exp(-log_scales)<line_sep>plus_in=inv_stdv<times>(centered_x+1./255.)<line_sep>cdf_plus=tf.nn.sigmoid(plus_in)<line_sep>min_in=inv_stdv<times>(centered_x-1./255.)<line_sep>cdf_min=tf.nn.sigmoid(min_in)<line_sep># log probability for edge case of 0 (before scaling) log_cdf_plus=plus_in-tf.nn.softplus(plus_in)<line_sep># log probability for edge case of 255 (before scaling) log_one_minus_cdf_min=-tf.nn.softplus(min_in)<line_sep>cdf_delta=cdf_plus-cdf_min# probability for all other cases mid_in=inv_stdv<times>centered_x<line_sep># log probability in the center of the bin, to be used in extreme cases # (not actually used in our code) log_pdf_mid=mid_in-log_scales-2.<times>tf.nn.softplus(mid_in)<line_sep># now select the right output: left edge case, right edge case, normal # case, extremely low prob case (doesn't actually happen for us) # this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select() # log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta))) # robust version, that still works if probabilities are below 1e-5 (which never happens in our code) # tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs # the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue # if the probability on a sub-pixel is below 1e-5, we use an approximation # based on the assumption that the log-density is constant in the bin of # the observed sub-pixel value log_probs=tf.where(x<l>-0.999 log_cdf_plus tf.where(x<g>0.999 log_one_minus_cdf_min tf.where(cdf_delta<g>1e-5 tf.log(tf.maximum(cdf_delta 1e-12)) log_pdf_mid-np.log(127.5))))<line_sep>log_probs=tf.reduce_sum(log_probs 3)+log_prob_from_logits(logit_probs)<if_stmt>sum_all<block_start><return>-tf.reduce_sum(log_sum_exp(log_probs))<block_end><else_stmt><block_start><return>-tf.reduce_sum(log_sum_exp(log_probs) [1 2])<block_end><block_end><def_stmt>sample_from_discretized_mix_logistic l nr_mix<block_start>ls=int_shape(l)<line_sep>xs=ls[:-1]+[3]<line_sep># unpack parameters logit_probs=l[: : : :nr_mix]<line_sep>l=tf.reshape(l[: : : nr_mix:] xs+[nr_mix<times>3])<line_sep># sample mixture indicator from softmax sel=tf.one_hot(tf.argmax(logit_probs-tf.log(-tf.log(tf.random_uniform(logit_probs.get_shape() minval=1e-5 maxval=1.-1e-5))) 3) depth=nr_mix dtype=tf.float32)<line_sep>sel=tf.reshape(sel xs[:-1]+[1 nr_mix])<line_sep># select logistic parameters means=tf.reduce_sum(l[: : : : :nr_mix]<times>sel 4)<line_sep>log_scales=tf.maximum(tf.reduce_sum(l[: : : : nr_mix:2<times>nr_mix]<times>sel 4) -7.)<line_sep>coeffs=tf.reduce_sum(tf.nn.tanh(l[: : : : 2<times>nr_mix:3<times>nr_mix])<times>sel 4)<line_sep># sample from logistic & clip to interval # we don't actually round to the nearest 8bit value when sampling u=tf.random_uniform(means.get_shape() minval=1e-5 maxval=1.-1e-5)<line_sep>x=means+tf.exp(log_scales)<times>(tf.log(u)-tf.log(1.-u))<line_sep>x0=tf.minimum(tf.maximum(x[: : : 0] -1.) 1.)<line_sep>x1=tf.minimum(tf.maximum(x[: : : 1]+coeffs[: : : 0]<times>x0 -1.) 1.)<line_sep>x2=tf.minimum(tf.maximum(x[: : : 2]+coeffs[: : : 1]<times>x0+coeffs[: : : 2]<times>x1 -1.) 1.)<line_sep><return>tf.concat([tf.reshape(x0 xs[:-1]+[1]) tf.reshape(x1 xs[:-1]+[1]) tf.reshape(x2 xs[:-1]+[1])] 3)<block_end><def_stmt>get_var_maybe_avg var_name ema **kwargs<block_start>''' utility for retrieving polyak averaged params '''<line_sep>v=tf.get_variable(var_name **kwargs)<if_stmt>ema<is><not><none><block_start>v=ema.average(v)<block_end><return>v<block_end><def_stmt>get_vars_maybe_avg var_names ema **kwargs<block_start>''' utility for retrieving polyak averaged params '''<line_sep>vars=[]<for_stmt>vn var_names<block_start>vars.append(get_var_maybe_avg(vn ema **kwargs))<block_end><return>vars<block_end><def_stmt>adam_updates params cost_or_grads lr=0.001 mom1=0.9 mom2=0.999 eps=1e-8<block_start>''' Adam optimizer '''<line_sep>updates=[]<if_stmt>type(cost_or_grads)<is><not>list<block_start>grads=tf.gradients(cost_or_grads params)<block_end><else_stmt><block_start>grads=cost_or_grads<block_end>t=tf.Variable(1. 'adam_t')<for_stmt>p,g zip(params grads)<block_start>mg=tf.Variable(tf.zeros(p.get_shape()) p.name+'_adam_mg')<if_stmt>mom1<g>0<block_start>v=tf.Variable(tf.zeros(p.get_shape()) p.name+'_adam_v')<line_sep>v_t=mom1<times>v+(1.-mom1)<times>g<line_sep>v_hat=v_t/(1.-tf.pow(mom1 t))<line_sep>updates.append(v.assign(v_t))<block_end><else_stmt><block_start>v_hat=g<block_end>mg_t=mom2<times>mg+(1.-mom2)<times>tf.square(g)<line_sep>mg_hat=mg_t/(1.-tf.pow(mom2 t))<line_sep>g_t=v_hat/tf.sqrt(mg_hat+eps)<line_sep>p_t=p-lr<times>g_t<line_sep>updates.append(mg.assign(mg_t))<line_sep>updates.append(p.assign(p_t))<block_end>updates.append(t.assign_add(1))<line_sep><return>tf.group(*updates)<block_end><def_stmt>get_name layer_name counters<block_start>''' utlity for keeping track of layer names '''<if_stmt><not>layer_name<in>counters<block_start>counters[layer_name]=0<block_end>name=layer_name+'_'+str(counters[layer_name])<line_sep>counters[layer_name]<augadd>1<line_sep><return>name<block_end>@add_arg_scope<def_stmt>dense x num_units nonlinearity=<none> init_scale=1. counters={} init=<false> ema=<none> **kwargs<block_start>''' fully connected layer '''<line_sep>name=get_name('dense' counters)<with_stmt>tf.variable_scope(name)<block_start><if_stmt>init# data based initialization of parameters <block_start>V=tf.get_variable('V' [int(x.get_shape()[1]) num_units] tf.float32 tf.random_normal_initializer(0 0.05) trainable=<true>)<line_sep>V_norm=tf.nn.l2_normalize(V.initialized_value() [0])<line_sep>x_init=tf.matmul(x V_norm)<line_sep>m_init,v_init=tf.nn.moments(x_init [0])<line_sep>scale_init=init_scale/tf.sqrt(v_init+1e-10)<line_sep>g=tf.get_variable('g' dtype=tf.float32 initializer=scale_init trainable=<true>)<line_sep>b=tf.get_variable('b' dtype=tf.float32 initializer=-m_init<times>scale_init trainable=<true>)<line_sep>x_init=tf.reshape(scale_init [1 num_units])<times>(x_init-tf.reshape(m_init [1 num_units]))<if_stmt>nonlinearity<is><not><none><block_start>x_init=nonlinearity(x_init)<block_end><return>x_init<block_end><else_stmt><block_start>V,g,b=get_vars_maybe_avg(['V' 'g' 'b'] ema)<line_sep># tf.assert_variables_initialized([V, g, b]) # use weight normalization (Salimans & Kingma, 2016) x=tf.matmul(x V)<line_sep>scaler=g/tf.sqrt(tf.reduce_sum(tf.square(V) [0]))<line_sep>x=tf.reshape(scaler [1 num_units])<times>x+tf.reshape(b [1 num_units])<line_sep># apply nonlinearity <if_stmt>nonlinearity<is><not><none><block_start>x=nonlinearity(x)<block_end><return>x<block_end><block_end><block_end>@add_arg_scope<def_stmt>conv2d x num_filters filter_size=[3 3] stride=[1 1] pad='SAME' nonlinearity=<none> init_scale=1. counters={} init=<false> ema=<none> **kwargs<block_start>''' convolutional layer '''<line_sep>name=get_name('conv2d' counters)<with_stmt>tf.variable_scope(name)<block_start><if_stmt>init# data based initialization of parameters <block_start>V=tf.get_variable('V' filter_size+[int(x.get_shape()[-1]) num_filters] tf.float32 tf.random_normal_initializer(0 0.05) trainable=<true>)<line_sep>V_norm=tf.nn.l2_normalize(V.initialized_value() [0 1 2])<line_sep>x_init=tf.nn.conv2d(x V_norm [1]+stride+[1] pad)<line_sep>m_init,v_init=tf.nn.moments(x_init [0 1 2])<line_sep>scale_init=init_scale/tf.sqrt(v_init+1e-8)<line_sep>g=tf.get_variable('g' dtype=tf.float32 initializer=scale_init trainable=<true>)<line_sep>b=tf.get_variable('b' dtype=tf.float32 initializer=-m_init<times>scale_init trainable=<true>)<line_sep>x_init=tf.reshape(scale_init [1 1 1 num_filters])<times>(x_init-tf.reshape(m_init [1 1 1 num_filters]))<if_stmt>nonlinearity<is><not><none><block_start>x_init=nonlinearity(x_init)<block_end><return>x_init<block_end><else_stmt><block_start>V,g,b=get_vars_maybe_avg(['V' 'g' 'b'] ema)<line_sep># tf.assert_variables_initialized([V, g, b]) # use weight normalization (Salimans & Kingma, 2016) W=tf.reshape(g [1 1 1 num_filters])<times>tf.nn.l2_normalize(V [0 1 2])<line_sep># calculate convolutional layer output x=tf.nn.bias_add(tf.nn.conv2d(x W [1]+stride+[1] pad) b)<line_sep># apply nonlinearity <if_stmt>nonlinearity<is><not><none><block_start>x=nonlinearity(x)<block_end><return>x<block_end><block_end><block_end>@add_arg_scope<def_stmt>deconv2d x num_filters filter_size=[3 3] stride=[1 1] pad='SAME' nonlinearity=<none> init_scale=1. counters={} init=<false> ema=<none> **kwargs<block_start>''' transposed convolutional layer '''<line_sep>name=get_name('deconv2d' counters)<line_sep>xs=int_shape(x)<if_stmt>pad<eq>'SAME'<block_start>target_shape=[xs[0] xs[1]<times>stride[0] xs[2]<times>stride[1] num_filters]<block_end><else_stmt><block_start>target_shape=[xs[0] xs[1]<times>stride[0]+filter_size[0]-1 xs[2]<times>stride[1]+filter_size[1]-1 num_filters]<block_end><with_stmt>tf.variable_scope(name)<block_start><if_stmt>init# data based initialization of parameters <block_start>V=tf.get_variable('V' filter_size+[num_filters int(x.get_shape()[-1])] tf.float32 tf.random_normal_initializer(0 0.05) trainable=<true>)<line_sep>V_norm=tf.nn.l2_normalize(V.initialized_value() [0 1 3])<line_sep>x_init=tf.nn.conv2d_transpose(x V_norm target_shape [1]+stride+[1] padding=pad)<line_sep>m_init,v_init=tf.nn.moments(x_init [0 1 2])<line_sep>scale_init=init_scale/tf.sqrt(v_init+1e-8)<line_sep>g=tf.get_variable('g' dtype=tf.float32 initializer=scale_init trainable=<true>)<line_sep>b=tf.get_variable('b' dtype=tf.float32 initializer=-m_init<times>scale_init trainable=<true>)<line_sep>x_init=tf.reshape(scale_init [1 1 1 num_filters])<times>(x_init-tf.reshape(m_init [1 1 1 num_filters]))<if_stmt>nonlinearity<is><not><none><block_start>x_init=nonlinearity(x_init)<block_end><return>x_init<block_end><else_stmt><block_start>V,g,b=get_vars_maybe_avg(['V' 'g' 'b'] ema)<line_sep># tf.assert_variables_initialized([V, g, b]) # use weight normalization (Salimans & Kingma, 2016) W=tf.reshape(g [1 1 num_filters 1])<times>tf.nn.l2_normalize(V [0 1 3])<line_sep># calculate convolutional layer output x=tf.nn.conv2d_transpose(x W target_shape [1]+stride+[1] padding=pad)<line_sep>x=tf.nn.bias_add(x b)<line_sep># apply nonlinearity <if_stmt>nonlinearity<is><not><none><block_start>x=nonlinearity(x)<block_end><return>x<block_end><block_end><block_end>@add_arg_scope<def_stmt>nin x num_units **kwargs<block_start>""" a network in network layer (1x1 CONV) """<line_sep>s=int_shape(x)<line_sep>x=tf.reshape(x [np.prod(s[:-1]) s[-1]])<line_sep>x=dense(x num_units **kwargs)<line_sep><return>tf.reshape(x s[:-1]+[num_units])<block_end>''' meta-layer consisting of multiple base layers '''<line_sep>@add_arg_scope<def_stmt>gated_resnet x a=<none> h=<none> nonlinearity=concat_elu conv=conv2d init=<false> counters={} ema=<none> dropout_p=0. **kwargs<block_start>xs=int_shape(x)<line_sep>num_filters=xs[-1]<line_sep>c1=conv(nonlinearity(x) num_filters)<if_stmt>a<is><not><none># add short-cut connection if auxiliary input 'a' is given <block_start>c1<augadd>nin(nonlinearity(a) num_filters)<block_end>c1=nonlinearity(c1)<if_stmt>dropout_p<g>0<block_start>c1=tf.nn.dropout(c1 keep_prob=1.-dropout_p)<block_end>c2=conv(c1 num_filters<times>2 init_scale=0.1)<line_sep># add projection of h vector if included: conditional generation <if_stmt>h<is><not><none><block_start><with_stmt>tf.variable_scope(get_name('conditional_weights' counters))<block_start>hw=get_var_maybe_avg('hw' ema shape=[int_shape(h)[-1] 2<times>num_filters] dtype=tf.float32 initializer=tf.random_normal_initializer(0 0.05) trainable=<true>)<block_end><if_stmt>init<block_start>hw=hw.initialized_value()<block_end>c2<augadd>tf.reshape(tf.matmul(h hw) [xs[0] 1 1 2<times>num_filters])<block_end># Is this 3,2 or 2,3 ? a,b=tf.split(c2 2 3)<line_sep>c3=a<times>tf.nn.sigmoid(b)<line_sep><return>x+c3<block_end>''' utilities for shifting the image around, efficient alternative to masking convolutions '''<def_stmt>down_shift x step=1<block_start>xs=int_shape(x)<line_sep><return>tf.concat([tf.zeros([xs[0] step xs[2] xs[3]]) x[: :xs[1]-step : :]] 1)<block_end><def_stmt>right_shift x step=1<block_start>xs=int_shape(x)<line_sep><return>tf.concat([tf.zeros([xs[0] xs[1] step xs[3]]) x[: : :xs[2]-step :]] 2)<block_end><def_stmt>left_shift x step=1<block_start>xs=int_shape(x)<line_sep><return>tf.concat([x[: : step: :] tf.zeros([xs[0] xs[1] step xs[3]]) ] 2)<block_end>@add_arg_scope<def_stmt>down_shifted_conv2d x num_filters filter_size=[2 3] stride=[1 1] **kwargs<block_start>x=tf.pad(x [[0 0] [filter_size[0]-1 0] [int((filter_size[1]-1)/2) int((filter_size[1]-1)/2)] [0 0]])<line_sep><return>conv2d(x num_filters filter_size=filter_size pad='VALID' stride=stride **kwargs)<block_end>@add_arg_scope<def_stmt>down_shifted_deconv2d x num_filters filter_size=[2 3] stride=[1 1] **kwargs<block_start>x=deconv2d(x num_filters filter_size=filter_size pad='VALID' stride=stride **kwargs)<line_sep>xs=int_shape(x)<line_sep><return>x[: :(xs[1]-filter_size[0]+1) int((filter_size[1]-1)/2):(xs[2]-int((filter_size[1]-1)/2)) :]<block_end>@add_arg_scope<def_stmt>down_right_shifted_conv2d x num_filters filter_size=[2 2] stride=[1 1] **kwargs<block_start>x=tf.pad(x [[0 0] [filter_size[0]-1 0] [filter_size[1]-1 0] [0 0]])<line_sep><return>conv2d(x num_filters filter_size=filter_size pad='VALID' stride=stride **kwargs)<block_end>@add_arg_scope<def_stmt>down_right_shifted_deconv2d x num_filters filter_size=[2 2] stride=[1 1] **kwargs<block_start>x=deconv2d(x num_filters filter_size=filter_size pad='VALID' stride=stride **kwargs)<line_sep>xs=int_shape(x)<line_sep><return>x[: :(xs[1]-filter_size[0]+1): :(xs[2]-filter_size[1]+1) :]<block_end><def_stmt>causal_shift_nin x num_filters **kwargs<block_start>chns=int_shape(x)[-1]<assert_stmt>chns%4<eq>0<line_sep>left,upleft,up,upright=tf.split(x 4 axis=-1)<line_sep><return>nin(tf.concat([right_shift(left) right_shift(down_shift(upleft)) down_shift(up) down_shift(left_shift(upleft))] axis=-1) num_filters **kwargs)<block_end><import_from_stmt>tensorflow.python.framework function<line_sep>@add_arg_scope<def_stmt>mem_saving_causal_shift_nin x num_filters init counters **kwargs<block_start><if_stmt>init<block_start><return>causal_shift_nin(x num_filters init=init counters=counters **kwargs)<block_end>shps=int_shape(x)<line_sep>@function.Defun(tf.float32)<def_stmt>go ix<block_start>tf.get_variable_scope().reuse_variables()<line_sep>ix.set_shape(shps)<line_sep><return>causal_shift_nin(ix num_filters init=init counters=counters **kwargs)<block_end>temp=go(x)<line_sep>temp.set_shape([shps[0] shps[1] shps[2] num_filters])<line_sep><return>temp<block_end><import_stmt>functools<line_sep>@functools.lru_cache(maxsize=32)<def_stmt>get_causal_mask canvas_size rate=1<block_start>causal_mask=np.zeros([canvas_size canvas_size] dtype=np.float32)<for_stmt>i range(canvas_size)<block_start>causal_mask[i :i]=1.<block_end>causal_mask=tf.constant(causal_mask dtype=tf.float32)<if_stmt>rate<g>1<block_start>dim=int(np.sqrt(canvas_size))<line_sep>causal_mask=tf.reshape(causal_mask [canvas_size dim dim 1])<line_sep>causal_mask=-tf.nn.max_pool(-causal_mask [1 rate rate 1] [1 rate rate 1] 'SAME')<block_end>causal_mask=tf.reshape(causal_mask [1 canvas_size -1])<line_sep><return>causal_mask<block_end><def_stmt>causal_attention key mixin query downsample=1 use_pos_enc=<false><block_start>bs,nr_chns=int_shape(key)[0] int_shape(key)[-1]<if_stmt>downsample<g>1<block_start>pool_shape=[1 downsample downsample 1]<line_sep>key=tf.nn.max_pool(key pool_shape pool_shape 'SAME')<line_sep>mixin=tf.nn.max_pool(mixin pool_shape pool_shape 'SAME')<block_end>xs=int_shape(mixin)<if_stmt>use_pos_enc<block_start>pos1=tf.range(0. xs[1])/xs[1]<line_sep>pos2=tf.range(0. xs[2])/xs[1]<line_sep>mixin=tf.concat([mixin tf.tile(pos1[<none> : <none> <none>] [xs[0] 1 xs[2] 1]) tf.tile(pos2[<none> <none> : <none>] [xs[0] xs[2] 1 1]) ] axis=3)<block_end>mixin_chns=int_shape(mixin)[-1]<line_sep>canvas_size=int(np.prod(int_shape(key)[1:-1]))<line_sep>canvas_size_q=int(np.prod(int_shape(query)[1:-1]))<line_sep>causal_mask=get_causal_mask(canvas_size_q downsample)<line_sep>dot=tf.matmul(tf.reshape(query [bs canvas_size_q nr_chns]) tf.reshape(key [bs canvas_size nr_chns]) transpose_b=<true>)-(1.-causal_mask)<times>1e10<line_sep>dot=dot-tf.reduce_max(dot axis=-1 keep_dims=<true>)<line_sep>causal_exp_dot=tf.exp(dot/np.sqrt(nr_chns).astype(np.float32))<times>causal_mask<line_sep>causal_probs=causal_exp_dot/(tf.reduce_sum(causal_exp_dot axis=-1 keep_dims=<true>)+1e-6)<line_sep>mixed=tf.matmul(causal_probs tf.reshape(mixin [bs canvas_size mixin_chns]))<line_sep><return>tf.reshape(mixed int_shape(query)[:-1]+[mixin_chns])<block_end><def_stmt>non_cached_get_causal_mask canvas_size causal_unit<block_start><assert_stmt>causal_unit<eq>1<line_sep>ones=tf.ones([canvas_size canvas_size] dtype=tf.float32)<line_sep>lt=tf.matrix_band_part(ones -1 0)-tf.matrix_diag(tf.ones([canvas_size ] dtype=tf.float32))<line_sep><return>lt[<none> <ellipsis>]<block_end><def_stmt>mem_saving_causal_attention _key _mixin _query causal_unit=1# @function.Defun(tf.float32, tf.float32, tf.float32) <block_start><def_stmt>go key mixin query <block_start>key.set_shape(int_shape(_key))<line_sep>mixin.set_shape(int_shape(_mixin))<line_sep>query.set_shape(int_shape(_query))<line_sep>bs,nr_chns=int_shape(key)[0] int_shape(key)[-1]<line_sep>mixin_chns=int_shape(mixin)[-1]<line_sep>canvas_size=int(np.prod(int_shape(key)[1:-1]))<line_sep>causal_mask=non_cached_get_causal_mask(canvas_size causal_unit=causal_unit)<line_sep>dot=tf.matmul(tf.reshape(query [bs canvas_size nr_chns]) tf.reshape(key [bs canvas_size nr_chns]) transpose_b=<true>)-(1.-causal_mask)<times>1e10<line_sep>dot=dot-tf.reduce_max(dot axis=-1 keep_dims=<true>)<line_sep>causal_exp_dot=tf.exp(dot/np.sqrt(nr_chns).astype(np.float32))<times>causal_mask<line_sep>causal_probs=causal_exp_dot/(tf.reduce_sum(causal_exp_dot axis=-1 keep_dims=<true>)+1e-6)<line_sep>mixed=tf.matmul(causal_probs tf.reshape(mixin [bs canvas_size mixin_chns]))<line_sep><return>tf.reshape(mixed int_shape(mixin))<block_end>temp=go(_key _mixin _query)<line_sep>temp.set_shape(int_shape(_mixin))<line_sep><return>temp<block_end>
<import_from_stmt>codalab.worker.bundle_state State<import_from_stmt>freezegun freeze_time<import_from_stmt>tests.unit.server.bundle_manager BaseBundleManagerTest<class_stmt>BundleManagerScheduleRunBundlesTest(BaseBundleManagerTest)<block_start><def_stmt>test_no_bundles self<block_start>"""With no bundles available, nothing should happen."""<line_sep>self.bundle_manager._schedule_run_bundles()<block_end><def_stmt>test_no_workers self<block_start>"""When no workers are available, no bundles should be scheduled."""<line_sep>bundle=self.create_run_bundle()<line_sep>self.save_bundle(bundle)<line_sep>self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.CREATED)<block_end><def_stmt>test_stage_single_bundle self<block_start>"""When a worker with the right specs is available, a bundle should be staged."""<line_sep>bundle=self.create_run_bundle(state=State.STAGED metadata=dict(request_memory="0" request_time="" request_cpus=1 request_gpus=0) )<line_sep>self.save_bundle(bundle)<line_sep>self.mock_worker_checkin(cpus=1 user_id=self.user_id)<line_sep>self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.STARTING)<block_end>@freeze_time("2020-02-01" as_kwarg='frozen_time')<def_stmt>test_cleanup_dead_workers self frozen_time<block_start>"""If workers don't check in for a long enough time period, they should be removed."""<line_sep>self.mock_worker_checkin(cpus=1 user_id=self.user_id)<line_sep>self.assertEqual(len(self.bundle_manager._worker_model.get_workers()) 1)<line_sep>frozen_time.move_to("2020-02-12")<line_sep>self.bundle_manager._schedule_run_bundles()<line_sep>self.assertEqual(len(self.bundle_manager._worker_model.get_workers()) 0)<block_end><def_stmt>test_restage_stuck_starting_bundles self<block_start>"""No workers are currently running a bundle, it should be restaged."""<line_sep>bundle=self.create_run_bundle(State.STARTING)<line_sep>self.save_bundle(bundle)<line_sep>self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.STAGED)<block_end><def_stmt>test_bring_offline_stuck_running_bundles self<block_start>"""If no workers exist to claim a bundle, it should go to the WORKER_OFFLINE state."""<line_sep>bundle=self.create_run_bundle(State.RUNNING)<line_sep>self.save_bundle(bundle)<line_sep>self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.WORKER_OFFLINE)<block_end><def_stmt>test_finalizing_bundle_goes_offline_if_no_worker_claims self<block_start>"""If no worker claims a FINALIZING bundle, it should go to the WORKER_OFFLINE_STATE."""<line_sep>bundle=self.create_run_bundle(State.FINALIZING)<line_sep>self.save_bundle(bundle)<line_sep>self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.WORKER_OFFLINE)<block_end><def_stmt>test_finalizing_bundle_gets_finished self<block_start>"""If a worker checks in with a "finalizing" message, the bundle should transition to the FINALIZING and then FINISHED state."""<line_sep>bundle=self.create_run_bundle(State.STAGED)<line_sep>self.save_bundle(bundle)<line_sep>worker_id=self.mock_worker_checkin(cpus=1 user_id=self.user_id)<line_sep># Bundle is assigned to worker self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.STARTING)<line_sep># Worker sends back a "finalizing" message bundle.state=State.FINALIZING<line_sep>self.mock_bundle_checkin(bundle worker_id)<line_sep># Bundle is finished self.bundle_manager._schedule_run_bundles()<line_sep>bundle=self.bundle_manager._model.get_bundle(bundle.uuid)<line_sep>self.assertEqual(bundle.state State.READY)<line_sep>self.assertEqual(self.bundle_manager._model.get_bundle_metadata([bundle.uuid] "time_preparing")[bundle.uuid] '5' )<line_sep>self.assertEqual(self.bundle_manager._model.get_bundle_metadata([bundle.uuid] "time_running")[bundle.uuid] '5' )<line_sep>self.assertEqual(self.bundle_manager._model.get_bundle_metadata([bundle.uuid] "time_uploading_results")[bundle.uuid] '5' )<line_sep>self.assertEqual(self.bundle_manager._model.get_bundle_metadata([bundle.uuid] "time_cleaning_up")[bundle.uuid] '5' )<block_end><block_end>
# -*- coding: utf-8 -*- """ Spectral Distributions of the Luminous Efficiency Functions =========================================================== Defines the spectral distributions of the luminous efficiency functions. The luminous efficiency data is in the form of a *dict* of :class:`colour.SpectralDistribution` classes as follows:: {'name': SpectralDistribution, ..., 'name': SpectralDistribution} The following luminous efficiency functions are available: - CIE 1924 Photopic Standard Observer - Judd Modified CIE 1951 Photopic Standard Observer - Judd-Vos Modified CIE 1978 Photopic Standard Observer - CIE 1964 Photopic 10 Degree Standard Observer - CIE 2008 2 Degree Physiologically Relevant LEF - CIE 2008 10 Degree Physiologically Relevant LEF - CIE 1951 Scotopic Standard Observer Notes ----- - The luminous efficiency functions are provided at 1 nm interval. - The mesopic luminous efficiency function is calculated using the *CIE 1924 Photopic Standard Observer* and *CIE 1951 Scotopic Standard Observer* luminous efficiency functions with the :func:`colour.sd_mesopic_luminous_efficiency_function` definition and the data from :attr:`colour.colorimetry.datasets.lefs.DATA_MESOPIC_X` attribute that defines weighting factors dependent on the photopic luminance :math:`L_p`. References ---------- - :cite:`CVRLq` : CVRL. (n.d.). Luminous efficiency. Retrieved April 19, 2014, from http://www.cvrl.org/lumindex.htm - :cite:`CVRLs` : CVRL. (n.d.). Older CIE Standards. Retrieved February 24, 2014, from http://cvrl.ioo.ucl.ac.uk/cie.htm - :cite:`Wikipedia2005d` : Wikipedia. (2005). Mesopic weighting function. Retrieved June 20, 2014, from http://en.wikipedia.org/wiki/Mesopic_vision#Mesopic_weighting_function """<import_from_stmt>functools partial<import_from_stmt>colour.colorimetry SpectralDistribution<import_from_stmt>colour.utilities CaseInsensitiveMapping LazyCaseInsensitiveMapping<line_sep>__author__='Colour Developers'<line_sep>__copyright__='Copyright (C) 2013-2021 - Colour Developers'<line_sep>__license__='New BSD License - https://opensource.org/licenses/BSD-3-Clause'<line_sep>__maintainer__='Colour Developers'<line_sep>__email__='<EMAIL>'<line_sep>__status__='Production'<line_sep>__all__=['DATA_LEFS_PHOTOPIC' 'SDS_LEFS_PHOTOPIC' 'DATA_LEFS_SCOTOPIC' 'SDS_LEFS_SCOTOPIC' 'SDS_LEFS' 'DATA_MESOPIC_X']<line_sep>DATA_LEFS_PHOTOPIC={'CIE 1924 Photopic Standard Observer':{360:0.0000039170000 361:0.0000043935810 362:0.0000049296040 363:0.0000055321360 364:0.0000062082450 365:0.0000069650000 366:0.0000078132190 367:0.0000087673360 368:0.0000098398440 369:0.0000110432300 370:0.0000123900000 371:0.0000138864100 372:0.0000155572800 373:0.0000174429600 374:0.0000195837500 375:0.0000220200000 376:0.0000248396500 377:0.0000280412600 378:0.0000315310400 379:0.0000352152100 380:0.0000390000000 381:0.0000428264000 382:0.0000469146000 383:0.0000515896000 384:0.0000571764000 385:0.0000640000000 386:0.0000723442100 387:0.0000822122400 388:0.0000935081600 389:0.0001061361000 390:0.0001200000000 391:0.0001349840000 392:0.0001514920000 393:0.0001702080000 394:0.0001918160000 395:0.0002170000000 396:0.0002469067000 397:0.0002812400000 398:0.0003185200000 399:0.0003572667000 400:0.0003960000000 401:0.0004337147000 402:0.0004730240000 403:0.0005178760000 404:0.0005722187000 405:0.0006400000000 406:0.0007245600000 407:0.0008255000000 408:0.0009411600000 409:0.0010698800000 410:0.0012100000000 411:0.0013620910000 412:0.0015307520000 413:0.0017203680000 414:0.0019353230000 415:0.0021800000000 416:0.0024548000000 417:0.0027640000000 418:0.0031178000000 419:0.0035264000000 420:0.0040000000000 421:0.0045462400000 422:0.0051593200000 423:0.0058292800000 424:0.0065461600000 425:0.0073000000000 426:0.0080865070000 427:0.0089087200000 428:0.0097676800000 429:0.0106644300000 430:0.0116000000000 431:0.0125731700000 432:0.0135827200000 433:0.0146296800000 434:0.0157150900000 435:0.0168400000000 436:0.0180073600000 437:0.0192144800000 438:0.0204539200000 439:0.0217182400000 440:0.0230000000000 441:0.0242946100000 442:0.0256102400000 443:0.0269585700000 444:0.0283512500000 445:0.0298000000000 446:0.0313108300000 447:0.0328836800000 448:0.0345211200000 449:0.0362257100000 450:0.0380000000000 451:0.0398466700000 452:0.0417680000000 453:0.0437660000000 454:0.0458426700000 455:0.0480000000000 456:0.0502436800000 457:0.0525730400000 458:0.0549805600000 459:0.0574587200000 460:0.0600000000000 461:0.0626019700000 462:0.0652775200000 463:0.0680420800000 464:0.0709110900000 465:0.0739000000000 466:0.0770160000000 467:0.0802664000000 468:0.0836668000000 469:0.0872328000000 470:0.0909800000000 471:0.0949175500000 472:0.0990458400000 473:0.1033674000000 474:0.1078846000000 475:0.1126000000000 476:0.1175320000000 477:0.1226744000000 478:0.1279928000000 479:0.1334528000000 480:0.1390200000000 481:0.1446764000000 482:0.1504693000000 483:0.1564619000000 484:0.1627177000000 485:0.1693000000000 486:0.1762431000000 487:0.1835581000000 488:0.1912735000000 489:0.1994180000000 490:0.2080200000000 491:0.2171199000000 492:0.2267345000000 493:0.2368571000000 494:0.2474812000000 495:0.2586000000000 496:0.2701849000000 497:0.2822939000000 498:0.2950505000000 499:0.3085780000000 500:0.3230000000000 501:0.3384021000000 502:0.3546858000000 503:0.3716986000000 504:0.3892875000000 505:0.4073000000000 506:0.4256299000000 507:0.4443096000000 508:0.4633944000000 509:0.4829395000000 510:0.5030000000000 511:0.5235693000000 512:0.5445120000000 513:0.5656900000000 514:0.5869653000000 515:0.6082000000000 516:0.6293456000000 517:0.6503068000000 518:0.6708752000000 519:0.6908424000000 520:0.7100000000000 521:0.7281852000000 522:0.7454636000000 523:0.7619694000000 524:0.7778368000000 525:0.7932000000000 526:0.8081104000000 527:0.8224962000000 528:0.8363068000000 529:0.8494916000000 530:0.8620000000000 531:0.8738108000000 532:0.8849624000000 533:0.8954936000000 534:0.9054432000000 535:0.9148501000000 536:0.9237348000000 537:0.9320924000000 538:0.9399226000000 539:0.9472252000000 540:0.9540000000000 541:0.9602561000000 542:0.9660074000000 543:0.9712606000000 544:0.9760225000000 545:0.9803000000000 546:0.9840924000000 547:0.9874182000000 548:0.9903128000000 549:0.9928116000000 550:0.9949501000000 551:0.9967108000000 552:0.9980983000000 553:0.9991120000000 554:0.9997482000000 555:1.0000000000000 556:0.9998567000000 557:0.9993046000000 558:0.9983255000000 559:0.9968987000000 560:0.9950000000000 561:0.9926005000000 562:0.9897426000000 563:0.9864444000000 564:0.9827241000000 565:0.9786000000000 566:0.9740837000000 567:0.9691712000000 568:0.9638568000000 569:0.9581349000000 570:0.9520000000000 571:0.9454504000000 572:0.9384992000000 573:0.9311628000000 574:0.9234576000000 575:0.9154000000000 576:0.9070064000000 577:0.8982772000000 578:0.8892048000000 579:0.8797816000000 580:0.8700000000000 581:0.8598613000000 582:0.8493920000000 583:0.8386220000000 584:0.8275813000000 585:0.8163000000000 586:0.8047947000000 587:0.7930820000000 588:0.7811920000000 589:0.7691547000000 590:0.7570000000000 591:0.7447541000000 592:0.7324224000000 593:0.7200036000000 594:0.7074965000000 595:0.6949000000000 596:0.6822192000000 597:0.6694716000000 598:0.6566744000000 599:0.6438448000000 600:0.6310000000000 601:0.6181555000000 602:0.6053144000000 603:0.5924756000000 604:0.5796379000000 605:0.5668000000000 606:0.5539611000000 607:0.5411372000000 608:0.5283528000000 609:0.5156323000000 610:0.5030000000000 611:0.4904688000000 612:0.4780304000000 613:0.4656776000000 614:0.4534032000000 615:0.4412000000000 616:0.4290800000000 617:0.4170360000000 618:0.4050320000000 619:0.3930320000000 620:0.3810000000000 621:0.3689184000000 622:0.3568272000000 623:0.3447768000000 624:0.3328176000000 625:0.3210000000000 626:0.3093381000000 627:0.2978504000000 628:0.2865936000000 629:0.2756245000000 630:0.2650000000000 631:0.2547632000000 632:0.2448896000000 633:0.2353344000000 634:0.2260528000000 635:0.2170000000000 636:0.2081616000000 637:0.1995488000000 638:0.1911552000000 639:0.1829744000000 640:0.1750000000000 641:0.1672235000000 642:0.1596464000000 643:0.1522776000000 644:0.1451259000000 645:0.1382000000000 646:0.1315003000000 647:0.1250248000000 648:0.1187792000000 649:0.1127691000000 650:0.1070000000000 651:0.1014762000000 652:0.0961886400000 653:0.0911229600000 654:0.0862648500000 655:0.0816000000000 656:0.0771206400000 657:0.0728255200000 658:0.0687100800000 659:0.0647697600000 660:0.0610000000000 661:0.0573962100000 662:0.0539550400000 663:0.0506737600000 664:0.0475496500000 665:0.0445800000000 666:0.0417587200000 667:0.0390849600000 668:0.0365638400000 669:0.0342004800000 670:0.0320000000000 671:0.0299626100000 672:0.0280766400000 673:0.0263293600000 674:0.0247080500000 675:0.0232000000000 676:0.0218007700000 677:0.0205011200000 678:0.0192810800000 679:0.0181206900000 680:0.0170000000000 681:0.0159037900000 682:0.0148371800000 683:0.0138106800000 684:0.0128347800000 685:0.0119200000000 686:0.0110683100000 687:0.0102733900000 688:0.0095333110000 689:0.0088461570000 690:0.0082100000000 691:0.0076237810000 692:0.0070854240000 693:0.0065914760000 694:0.0061384850000 695:0.0057230000000 696:0.0053430590000 697:0.0049957960000 698:0.0046764040000 699:0.0043800750000 700:0.0041020000000 701:0.0038384530000 702:0.0035890990000 703:0.0033542190000 704:0.0031340930000 705:0.0029290000000 706:0.0027381390000 707:0.0025598760000 708:0.0023932440000 709:0.0022372750000 710:0.0020910000000 711:0.0019535870000 712:0.0018245800000 713:0.0017035800000 714:0.0015901870000 715:0.0014840000000 716:0.0013844960000 717:0.0012912680000 718:0.0012040920000 719:0.0011227440000 720:0.0010470000000 721:0.0009765896000 722:0.0009111088000 723:0.0008501332000 724:0.0007932384000 725:0.0007400000000 726:0.0006900827000 727:0.0006433100000 728:0.0005994960000 729:0.0005584547000 730:0.0005200000000 731:0.0004839136000 732:0.0004500528000 733:0.0004183452000 734:0.0003887184000 735:0.0003611000000 736:0.0003353835000 737:0.0003114404000 738:0.0002891656000 739:0.0002684539000 740:0.0002492000000 741:0.0002313019000 742:0.0002146856000 743:0.0001992884000 744:0.0001850475000 745:0.0001719000000 746:0.0001597781000 747:0.0001486044000 748:0.0001383016000 749:0.0001287925000 750:0.0001200000000 751:0.0001118595000 752:0.0001043224000 753:0.0000973356000 754:0.0000908458700 755:0.0000848000000 756:0.0000791466700 757:0.0000738580000 758:0.0000689160000 759:0.0000643026700 760:0.0000600000000 761:0.0000559818700 762:0.0000522256000 763:0.0000487184000 764:0.0000454474700 765:0.0000424000000 766:0.0000395610400 767:0.0000369151200 768:0.0000344486800 769:0.0000321481600 770:0.0000300000000 771:0.0000279912500 772:0.0000261135600 773:0.0000243602400 774:0.0000227246100 775:0.0000212000000 776:0.0000197785500 777:0.0000184528500 778:0.0000172168700 779:0.0000160645900 780:0.0000149900000 781:0.0000139872800 782:0.0000130515500 783:0.0000121781800 784:0.0000113625400 785:0.0000106000000 786:0.0000098858770 787:0.0000092173040 788:0.0000085923620 789:0.0000080091330 790:0.0000074657000 791:0.0000069595670 792:0.0000064879950 793:0.0000060486990 794:0.0000056393960 795:0.0000052578000 796:0.0000049017710 797:0.0000045697200 798:0.0000042601940 799:0.0000039717390 800:0.0000037029000 801:0.0000034521630 802:0.0000032183020 803:0.0000030003000 804:0.0000027971390 805:0.0000026078000 806:0.0000024312200 807:0.0000022665310 808:0.0000021130130 809:0.0000019699430 810:0.0000018366000 811:0.0000017122300 812:0.0000015962280 813:0.0000014880900 814:0.0000013873140 815:0.0000012934000 816:0.0000012058200 817:0.0000011241430 818:0.0000010480090 819:0.0000009770578 820:0.0000009109300 821:0.0000008492513 822:0.0000007917212 823:0.0000007380904 824:0.0000006881098 825:0.0000006415300 826:0.0000005980895 827:0.0000005575746 828:0.0000005198080 829:0.0000004846123 830:0.0000004518100} 'Judd Modified CIE 1951 Photopic Standard Observer':{370:0.0001 380:0.0004 390:0.0015 400:0.0045 410:0.0093 420:0.0175 430:0.0273 440:0.0379 450:0.0468 460:0.0600 470:0.0910 480:0.1390 490:0.2080 500:0.3230 510:0.5030 520:0.7100 530:0.8620 540:0.9540 550:0.9950 560:0.9950 570:0.9520 580:0.8700 590:0.7570 600:0.6310 610:0.5030 620:0.3810 630:0.2650 640:0.1750 650:0.1070 660:0.0610 670:0.0320 680:0.0170 690:0.0082 700:0.0041 710:0.0021 720:0.0011 730:0.0005 740:0.0002 750:0.0001 760:0.0001 770:0.0000} 'Judd-Vos Modified CIE 1978 Photopic Standard Observer':{380:0.0002000000 381:0.0002282100 382:0.0002610900 383:0.0002993600 384:0.0003438700 385:0.0003955600 386:0.0004554400 387:0.0005246200 388:0.0006042800 389:0.0006956500 390:0.0008000000 391:0.0009163500 392:0.0010477000 393:0.0011955000 394:0.0013611000 395:0.0015457000 396:0.0017508000 397:0.0018776000 398:0.0022273000 399:0.0025011000 400:0.0028000000 401:0.0031159000 402:0.0034576000 403:0.0038268000 404:0.0042256000 405:0.0046562000 406:0.0051216000 407:0.0056248000 408:0.0061695000 409:0.0067597000 410:0.0074000000 411:0.0081451000 412:0.0089555000 413:0.0098322000 414:0.0107740000 415:0.0117790000 416:0.0128420000 417:0.0139560000 418:0.0151110000 419:0.0162970000 420:0.0175000000 421:0.0185820000 422:0.0196450000 423:0.0206830000 424:0.0216940000 425:0.0226780000 426:0.0236360000 427:0.0245720000 428:0.0254900000 429:0.0263970000 430:0.0273000000 431:0.0283350000 432:0.0293830000 433:0.0304420000 434:0.0315100000 435:0.0325840000 436:0.0336610000 437:0.0347350000 438:0.0358030000 439:0.0368600000 440:0.0379000000 441:0.0388380000 442:0.0397520000 443:0.0406460000 444:0.0415240000 445:0.0423910000 446:0.0432520000 447:0.0441160000 448:0.0449900000 449:0.0458810000 450:0.0468000000 451:0.0477430000 452:0.0487330000 453:0.0497850000 454:0.0509100000 455:0.0521220000 456:0.0534350000 457:0.0548640000 458:0.0564240000 459:0.0581310000 460:0.0600000000 461:0.0626019700 462:0.0652775200 463:0.0680420800 464:0.0709110900 465:0.0739000000 466:0.0770160000 467:0.0802664000 468:0.0836668000 469:0.0872328000 470:0.0909800000 471:0.0949175500 472:0.0990458400 473:0.1033674000 474:0.1078846000 475:0.1126000000 476:0.1175320000 477:0.1226744000 478:0.1279928000 479:0.1334528000 480:0.1390200000 481:0.1446764000 482:0.1504693000 483:0.1564619000 484:0.1627177000 485:0.1693000000 486:0.1762431000 487:0.1835581000 488:0.1912735000 489:0.1994180000 490:0.2080200000 491:0.2171199000 492:0.2267345000 493:0.2368571000 494:0.2474812000 495:0.2586000000 496:0.2701849000 497:0.2822939000 498:0.2950505000 499:0.3085780000 500:0.3230000000 501:0.3384021000 502:0.3546858000 503:0.3716986000 504:0.3892875000 505:0.4073000000 506:0.4256299000 507:0.4443096000 508:0.4633944000 509:0.4829395000 510:0.5030000000 511:0.5235693000 512:0.5445120000 513:0.5656900000 514:0.5869653000 515:0.6082000000 516:0.6293456000 517:0.6503068000 518:0.6708752000 519:0.6908424000 520:0.7100000000 521:0.7281852000 522:0.7454636000 523:0.7619694000 524:0.7778368000 525:0.7932000000 526:0.8081104000 527:0.8224962000 528:0.8363068000 529:0.8494916000 530:0.8620000000 531:0.8738108000 532:0.8849624000 533:0.8954936000 534:0.9054432000 535:0.9148501000 536:0.9237348000 537:0.9320924000 538:0.9399226000 539:0.9472252000 540:0.9540000000 541:0.9602561000 542:0.9660074000 543:0.9712606000 544:0.9760225000 545:0.9803000000 546:0.9840924000 547:0.9874182000 548:0.9903128000 549:0.9928116000 550:0.9949501000 551:0.9967108000 552:0.9980983000 553:0.9991120000 554:0.9997482000 555:1.0000000000 556:0.9998567000 557:0.9993046000 558:0.9983255000 559:0.9968987000 560:0.9950000000 561:0.9926005000 562:0.9897426000 563:0.9864444000 564:0.9827241000 565:0.9786000000 566:0.9740837000 567:0.9691712000 568:0.9638568000 569:0.9581349000 570:0.9520000000 571:0.9454504000 572:0.9384992000 573:0.9311628000 574:0.9234576000 575:0.9154000000 576:0.9070064000 577:0.8982772000 578:0.8892048000 579:0.8797816000 580:0.8700000000 581:0.8598613000 582:0.8493920000 583:0.8386220000 584:0.8275813000 585:0.8163000000 586:0.8047947000 587:0.7930820000 588:0.7811920000 589:0.7691547000 590:0.7570000000 591:0.7447541000 592:0.7324224000 593:0.7200036000 594:0.7074965000 595:0.6949000000 596:0.6822192000 597:0.6694716000 598:0.6566744000 599:0.6438448000 600:0.6310000000 601:0.6181555000 602:0.6053144000 603:0.5924756000 604:0.5796379000 605:0.5668000000 606:0.5539611000 607:0.5411372000 608:0.5283528000 609:0.5156323000 610:0.5030000000 611:0.4904688000 612:0.4780304000 613:0.4656776000 614:0.4534032000 615:0.4412000000 616:0.4290800000 617:0.4170360000 618:0.4050320000 619:0.3930320000 620:0.3810000000 621:0.3689184000 622:0.3568272000 623:0.3447768000 624:0.3328176000 625:0.3210000000 626:0.3093381000 627:0.2978504000 628:0.2865936000 629:0.2756245000 630:0.2650000000 631:0.2547632000 632:0.2448896000 633:0.2353344000 634:0.2260528000 635:0.2170000000 636:0.2081616000 637:0.1995488000 638:0.1911552000 639:0.1829744000 640:0.1750000000 641:0.1672235000 642:0.1596464000 643:0.1522776000 644:0.1451259000 645:0.1382000000 646:0.1315003000 647:0.1250248000 648:0.1187792000 649:0.1127691000 650:0.1070000000 651:0.1014762000 652:0.0961886400 653:0.0911229600 654:0.0862648500 655:0.0816000000 656:0.0771206400 657:0.0728255200 658:0.0687100800 659:0.0647697600 660:0.0610000000 661:0.0573962100 662:0.0539550400 663:0.0506737600 664:0.0475496500 665:0.0445800000 666:0.0417587200 667:0.0390849600 668:0.0365638400 669:0.0342004800 670:0.0320000000 671:0.0299626100 672:0.0280766400 673:0.0263293600 674:0.0247080500 675:0.0232000000 676:0.0218007700 677:0.0205011200 678:0.0192810800 679:0.0181206900 680:0.0170000000 681:0.0159037900 682:0.0148371800 683:0.0138106800 684:0.0128347800 685:0.0119200000 686:0.0110683100 687:0.0102733900 688:0.0095333110 689:0.0088461570 690:0.0082100000 691:0.0076237810 692:0.0070854240 693:0.0065914760 694:0.0061384850 695:0.0057230000 696:0.0053430590 697:0.0049957960 698:0.0046764040 699:0.0043800750 700:0.0041020000 701:0.0038384530 702:0.0035890990 703:0.0033542190 704:0.0031340930 705:0.0029290000 706:0.0027381390 707:0.0025598760 708:0.0023932440 709:0.0022372750 710:0.0020910000 711:0.0019535870 712:0.0018245800 713:0.0017035800 714:0.0015901870 715:0.0014840000 716:0.0013844960 717:0.0012912680 718:0.0012040920 719:0.0011227440 720:0.0010470000 721:0.0009765896 722:0.0009111088 723:0.0008501332 724:0.0007932384 725:0.0007400000 726:0.0006900827 727:0.0006433100 728:0.0005994960 729:0.0005584547 730:0.0005200000 731:0.0004839136 732:0.0004500528 733:0.0004183452 734:0.0003887184 735:0.0003611000 736:0.0003353835 737:0.0003114404 738:0.0002891656 739:0.0002684539 740:0.0002492000 741:0.0002313019 742:0.0002146856 743:0.0001992884 744:0.0001850475 745:0.0001719000 746:0.0001597781 747:0.0001486044 748:0.0001383016 749:0.0001287925 750:0.0001200000 751:0.0001118595 752:0.0001043224 753:0.0000973356 754:0.0000908459 755:0.0000848000 756:0.0000791467 757:0.0000738580 758:0.0000689160 759:0.0000643027 760:0.0000600000 761:0.0000559819 762:0.0000522256 763:0.0000487184 764:0.0000454475 765:0.0000424000 766:0.0000395610 767:0.0000369151 768:0.0000344487 769:0.0000321482 770:0.0000300000 771:0.0000279913 772:0.0000261136 773:0.0000243602 774:0.0000227246 775:0.0000212000 776:0.0000197786 777:0.0000184529 778:0.0000172169 779:0.0000160646 780:0.0000149900} 'CIE 1964 Photopic 10 Degree Standard Observer':{360:0.000000013398 361:0.000000020294 362:0.000000030560 363:0.000000045740 364:0.000000068050 365:0.000000100650 366:0.000000147980 367:0.000000216270 368:0.000000314200 369:0.000000453700 370:0.000000651100 371:0.000000928800 372:0.000001317500 373:0.000001857200 374:0.000002602000 375:0.000003625000 376:0.000005019000 377:0.000006907000 378:0.000009449000 379:0.000012848000 380:0.000017364000 381:0.000023327000 382:0.000031150000 383:0.000041350000 384:0.000054560000 385:0.000071560000 386:0.000093300000 387:0.000120870000 388:0.000155640000 389:0.000199200000 390:0.000253400000 391:0.000320200000 392:0.000402400000 393:0.000502300000 394:0.000623200000 395:0.000768500000 396:0.000941700000 397:0.001147800000 398:0.001390300000 399:0.001674000000 400:0.002004400000 401:0.002386000000 402:0.002822000000 403:0.003319000000 404:0.003880000000 405:0.004509000000 406:0.005209000000 407:0.005985000000 408:0.006833000000 409:0.007757000000 410:0.008756000000 411:0.009816000000 412:0.010918000000 413:0.012058000000 414:0.013237000000 415:0.014456000000 416:0.015717000000 417:0.017025000000 418:0.018399000000 419:0.019848000000 420:0.021391000000 421:0.022992000000 422:0.024598000000 423:0.026213000000 424:0.027841000000 425:0.029497000000 426:0.031195000000 427:0.032927000000 428:0.034738000000 429:0.036654000000 430:0.038676000000 431:0.040792000000 432:0.042946000000 433:0.045114000000 434:0.047333000000 435:0.049602000000 436:0.051934000000 437:0.054337000000 438:0.056822000000 439:0.059399000000 440:0.062077000000 441:0.064737000000 442:0.067285000000 443:0.069764000000 444:0.072218000000 445:0.074704000000 446:0.077272000000 447:0.079979000000 448:0.082874000000 449:0.086000000000 450:0.089456000000 451:0.092947000000 452:0.096275000000 453:0.099535000000 454:0.102829000000 455:0.106256000000 456:0.109901000000 457:0.113835000000 458:0.118167000000 459:0.122932000000 460:0.128201000000 461:0.133457000000 462:0.138323000000 463:0.143042000000 464:0.147787000000 465:0.152761000000 466:0.158102000000 467:0.163941000000 468:0.170362000000 469:0.177425000000 470:0.185190000000 471:0.193025000000 472:0.200313000000 473:0.207156000000 474:0.213644000000 475:0.219940000000 476:0.226170000000 477:0.232467000000 478:0.239025000000 479:0.245997000000 480:0.253589000000 481:0.261876000000 482:0.270643000000 483:0.279645000000 484:0.288694000000 485:0.297665000000 486:0.306469000000 487:0.315035000000 488:0.323335000000 489:0.331366000000 490:0.339133000000 491:0.347860000000 492:0.358326000000 493:0.370001000000 494:0.382464000000 495:0.395379000000 496:0.408482000000 497:0.421588000000 498:0.434619000000 499:0.447601000000 500:0.460777000000 501:0.474340000000 502:0.488200000000 503:0.502340000000 504:0.516740000000 505:0.531360000000 506:0.546190000000 507:0.561180000000 508:0.576290000000 509:0.591500000000 510:0.606741000000 511:0.622150000000 512:0.637830000000 513:0.653710000000 514:0.669680000000 515:0.685660000000 516:0.701550000000 517:0.717230000000 518:0.732570000000 519:0.747460000000 520:0.761757000000 521:0.775340000000 522:0.788220000000 523:0.800460000000 524:0.812140000000 525:0.823330000000 526:0.834120000000 527:0.844600000000 528:0.854870000000 529:0.865040000000 530:0.875211000000 531:0.885370000000 532:0.895370000000 533:0.905150000000 534:0.914650000000 535:0.923810000000 536:0.932550000000 537:0.940810000000 538:0.948520000000 539:0.955600000000 540:0.961988000000 541:0.967540000000 542:0.972230000000 543:0.976170000000 544:0.979460000000 545:0.982200000000 546:0.984520000000 547:0.986520000000 548:0.988320000000 549:0.990020000000 550:0.991761000000 551:0.993530000000 552:0.995230000000 553:0.996770000000 554:0.998090000000 555:0.999110000000 556:0.999770000000 557:1.000000000000 558:0.999710000000 559:0.998850000000 560:0.997340000000 561:0.995260000000 562:0.992740000000 563:0.989750000000 564:0.986300000000 565:0.982380000000 566:0.977980000000 567:0.973110000000 568:0.967740000000 569:0.961890000000 570:0.955552000000 571:0.948601000000 572:0.940981000000 573:0.932798000000 574:0.924158000000 575:0.915175000000 576:0.905954000000 577:0.896608000000 578:0.887249000000 579:0.877986000000 580:0.868934000000 581:0.860164000000 582:0.851519000000 583:0.842963000000 584:0.834393000000 585:0.825623000000 586:0.816764000000 587:0.807544000000 588:0.797947000000 589:0.787893000000 590:0.777405000000 591:0.766490000000 592:0.755309000000 593:0.743845000000 594:0.732190000000 595:0.720353000000 596:0.708281000000 597:0.696055000000 598:0.683621000000 599:0.671048000000 600:0.658341000000 601:0.645545000000 602:0.632718000000 603:0.619815000000 604:0.606887000000 605:0.593878000000 606:0.580781000000 607:0.567653000000 608:0.554490000000 609:0.541228000000 610:0.527963000000 611:0.514634000000 612:0.501363000000 613:0.488124000000 614:0.474935000000 615:0.461834000000 616:0.448823000000 617:0.435917000000 618:0.423153000000 619:0.410526000000 620:0.398057000000 621:0.385835000000 622:0.373951000000 623:0.362311000000 624:0.350863000000 625:0.339554000000 626:0.328309000000 627:0.317118000000 628:0.305936000000 629:0.294737000000 630:0.283493000000 631:0.272222000000 632:0.260990000000 633:0.249877000000 634:0.238946000000 635:0.228254000000 636:0.217853000000 637:0.207780000000 638:0.198072000000 639:0.188748000000 640:0.179828000000 641:0.171285000000 642:0.163059000000 643:0.155151000000 644:0.147535000000 645:0.140211000000 646:0.133170000000 647:0.126400000000 648:0.119892000000 649:0.113640000000 650:0.107633000000 651:0.101870000000 652:0.096347000000 653:0.091063000000 654:0.086010000000 655:0.081187000000 656:0.076583000000 657:0.072198000000 658:0.068024000000 659:0.064052000000 660:0.060281000000 661:0.056697000000 662:0.053292000000 663:0.050059000000 664:0.046998000000 665:0.044096000000 666:0.041345000000 667:0.038750700000 668:0.036297800000 669:0.033983200000 670:0.031800400000 671:0.029739500000 672:0.027791800000 673:0.025955100000 674:0.024226300000 675:0.022601700000 676:0.021077900000 677:0.019650500000 678:0.018315300000 679:0.017068600000 680:0.015905100000 681:0.014818300000 682:0.013800800000 683:0.012849500000 684:0.011960700000 685:0.011130300000 686:0.010355500000 687:0.009633200000 688:0.008959900000 689:0.008332400000 690:0.007748800000 691:0.007204600000 692:0.006697500000 693:0.006225100000 694:0.005785000000 695:0.005375100000 696:0.004994100000 697:0.004639200000 698:0.004309300000 699:0.004002800000 700:0.003717740000 701:0.003452620000 702:0.003205830000 703:0.002976230000 704:0.002762810000 705:0.002564560000 706:0.002380480000 707:0.002209710000 708:0.002051320000 709:0.001904490000 710:0.001768470000 711:0.001642360000 712:0.001525350000 713:0.001416720000 714:0.001315950000 715:0.001222390000 716:0.001135550000 717:0.001054940000 718:0.000980140000 719:0.000910660000 720:0.000846190000 721:0.000786290000 722:0.000730680000 723:0.000678990000 724:0.000631010000 725:0.000586440000 726:0.000545110000 727:0.000506720000 728:0.000471110000 729:0.000438050000 730:0.000407410000 731:0.000378962000 732:0.000352543000 733:0.000328001000 734:0.000305208000 735:0.000284041000 736:0.000264375000 737:0.000246109000 738:0.000229143000 739:0.000213376000 740:0.000198730000 741:0.000185115000 742:0.000172454000 743:0.000160678000 744:0.000149730000 745:0.000139550000 746:0.000130086000 747:0.000121290000 748:0.000113106000 749:0.000105501000 750:0.000098428000 751:0.000091853000 752:0.000085738000 753:0.000080048000 754:0.000074751000 755:0.000069819000 756:0.000065222000 757:0.000060939000 758:0.000056942000 759:0.000053217000 760:0.000049737000 761:0.000046491000 762:0.000043464000 763:0.000040635000 764:0.000038000000 765:0.000035540500 766:0.000033244800 767:0.000031100600 768:0.000029099000 769:0.000027230700 770:0.000025486000 771:0.000023856100 772:0.000022333200 773:0.000020910400 774:0.000019580800 775:0.000018338400 776:0.000017177700 777:0.000016093400 778:0.000015080000 779:0.000014133600 780:0.000013249000 781:0.000012422600 782:0.000011649900 783:0.000010927700 784:0.000010251900 785:0.000009619600 786:0.000009028100 787:0.000008474000 788:0.000007954800 789:0.000007468600 790:0.000007012800 791:0.000006585800 792:0.000006185700 793:0.000005810700 794:0.000005459000 795:0.000005129800 796:0.000004820600 797:0.000004531200 798:0.000004259100 799:0.000004004200 800:0.000003764730 801:0.000003539950 802:0.000003329140 803:0.000003131150 804:0.000002945290 805:0.000002770810 806:0.000002607050 807:0.000002453290 808:0.000002308940 809:0.000002173380 810:0.000002046130 811:0.000001926620 812:0.000001814400 813:0.000001708950 814:0.000001609880 815:0.000001516770 816:0.000001429210 817:0.000001346860 818:0.000001269450 819:0.000001196620 820:0.000001128090 821:0.000001063680 822:0.000001003130 823:0.000000946220 824:0.000000892630 825:0.000000842160 826:0.000000794640 827:0.000000749780 828:0.000000707440 829:0.000000667480 830:0.000000629700} 'CIE 2008 2 Degree Physiologically Relevant LEF':{390:4.14616e-04 391:5.02833e-04 392:6.08499e-04 393:7.34444e-04 394:8.83739e-04 395:1.05965e-03 396:1.26553e-03 397:1.50475e-03 398:1.78049e-03 399:2.09557e-03 400:2.45219e-03 401:2.85222e-03 402:3.29912e-03 403:3.79747e-03 404:4.35277e-03 405:4.97172e-03 406:5.66101e-03 407:6.42161e-03 408:7.25031e-03 409:8.14017e-03 410:9.07986e-03 411:1.00561e-02 412:1.10646e-02 413:1.21052e-02 414:1.31801e-02 415:1.42938e-02 416:1.54500e-02 417:1.66409e-02 418:1.78530e-02 419:1.90702e-02 420:2.02737e-02 421:2.14481e-02 422:2.26004e-02 423:2.37479e-02 424:2.49125e-02 425:2.61211e-02 426:2.73992e-02 427:2.87499e-02 428:3.01691e-02 429:3.16514e-02 430:3.31904e-02 431:3.47791e-02 432:3.64149e-02 433:3.80957e-02 434:3.98184e-02 435:4.15794e-02 436:4.33710e-02 437:4.51718e-02 438:4.69542e-02 439:4.86872e-02 440:5.03366e-02 441:5.18761e-02 442:5.33222e-02 443:5.47060e-02 444:5.60634e-02 445:5.74339e-02 446:5.88511e-02 447:6.03081e-02 448:6.17864e-02 449:6.32657e-02 450:6.47235e-02 451:6.61475e-02 452:6.75726e-02 453:6.90493e-02 454:7.06328e-02 455:7.23834e-02 456:7.43596e-02 457:7.65938e-02 458:7.91144e-02 459:8.19535e-02 460:8.51482e-02 461:8.87266e-02 462:9.26601e-02 463:9.68972e-02 464:1.01375e-01 465:1.06014e-01 466:1.10738e-01 467:1.15511e-01 468:1.20312e-01 469:1.25116e-01 470:1.29896e-01 471:1.34630e-01 472:1.39331e-01 473:1.44023e-01 474:1.48737e-01 475:1.53507e-01 476:1.58364e-01 477:1.63320e-01 478:1.68376e-01 479:1.73537e-01 480:1.78805e-01 481:1.84182e-01 482:1.89656e-01 483:1.95210e-01 484:2.00826e-01 485:2.06483e-01 486:2.12183e-01 487:2.18028e-01 488:2.24159e-01 489:2.30730e-01 490:2.37916e-01 491:2.45871e-01 492:2.54602e-01 493:2.64076e-01 494:2.74249e-01 495:2.85068e-01 496:2.96484e-01 497:3.08501e-01 498:3.21139e-01 499:3.34418e-01 500:3.48354e-01 501:3.62960e-01 502:3.78228e-01 503:3.94136e-01 504:4.10658e-01 505:4.27760e-01 506:4.45399e-01 507:4.63540e-01 508:4.82138e-01 509:5.01143e-01 510:5.20497e-01 511:5.40139e-01 512:5.60021e-01 513:5.80097e-01 514:6.00317e-01 515:6.20626e-01 516:6.40940e-01 517:6.61077e-01 518:6.80813e-01 519:6.99904e-01 520:7.18089e-01 521:7.35159e-01 522:7.51182e-01 523:7.66314e-01 524:7.80735e-01 525:7.94645e-01 526:8.08207e-01 527:8.21382e-01 528:8.34070e-01 529:8.46171e-01 530:8.57580e-01 531:8.68241e-01 532:8.78306e-01 533:8.87991e-01 534:8.97521e-01 535:9.07135e-01 536:9.16995e-01 537:9.26929e-01 538:9.36673e-01 539:9.45948e-01 540:9.54468e-01 541:9.61983e-01 542:9.68439e-01 543:9.73829e-01 544:9.78152e-01 545:9.81411e-01 546:9.83667e-01 547:9.85208e-01 548:9.86381e-01 549:9.87536e-01 550:9.89023e-01 551:9.91081e-01 552:9.93491e-01 553:9.95917e-01 554:9.98021e-01 555:9.99461e-01 556:9.99993e-01 557:9.99756e-01 558:9.98984e-01 559:9.97912e-01 560:9.96774e-01 561:9.95736e-01 562:9.94711e-01 563:9.93553e-01 564:9.92116e-01 565:9.90255e-01 566:9.87860e-01 567:9.84932e-01 568:9.81504e-01 569:9.77603e-01 570:9.73261e-01 571:9.68476e-01 572:9.63137e-01 573:9.57106e-01 574:9.50254e-01 575:9.42457e-01 576:9.33690e-01 577:9.24289e-01 578:9.14671e-01 579:9.05233e-01 580:8.96361e-01 581:8.88307e-01 582:8.80846e-01 583:8.73645e-01 584:8.66376e-01 585:8.58720e-01 586:8.50430e-01 587:8.41505e-01 588:8.32011e-01 589:8.22015e-01 590:8.11587e-01 591:8.00787e-01 592:7.89652e-01 593:7.78205e-01 594:7.66473e-01 595:7.54479e-01 596:7.42247e-01 597:7.29823e-01 598:7.17252e-01 599:7.04582e-01 600:6.91855e-01 601:6.79101e-01 602:6.66285e-01 603:6.53359e-01 604:6.40281e-01 605:6.27007e-01 606:6.13515e-01 607:5.99849e-01 608:5.86068e-01 609:5.72226e-01 610:5.58375e-01 611:5.44554e-01 612:5.30767e-01 613:5.17013e-01 614:5.03289e-01 615:4.89595e-01 616:4.75944e-01 617:4.62396e-01 618:4.49015e-01 619:4.35862e-01 620:4.22990e-01 621:4.10415e-01 622:3.98036e-01 623:3.85730e-01 624:3.73391e-01 625:3.60924e-01 626:3.48286e-01 627:3.35570e-01 628:3.22896e-01 629:3.10370e-01 630:2.98086e-01 631:2.86116e-01 632:2.74482e-01 633:2.63195e-01 634:2.52263e-01 635:2.41690e-01 636:2.31481e-01 637:2.21638e-01 638:2.12162e-01 639:2.03054e-01 640:1.94312e-01 641:1.85923e-01 642:1.77827e-01 643:1.69965e-01 644:1.62284e-01 645:1.54740e-01 646:1.47308e-01 647:1.40017e-01 648:1.32901e-01 649:1.25991e-01 650:1.19312e-01 651:1.12882e-01 652:1.06711e-01 653:1.00805e-01 654:9.51665e-02 655:8.97959e-02 656:8.46904e-02 657:7.98401e-02 658:7.52337e-02 659:7.08606e-02 660:6.67104e-02 661:6.27736e-02 662:5.90418e-02 663:5.55070e-02 664:5.21614e-02 665:4.89970e-02 666:4.60058e-02 667:4.31788e-02 668:4.05075e-02 669:3.79838e-02 670:3.55998e-02 671:3.33486e-02 672:3.12233e-02 673:2.92178e-02 674:2.73260e-02 675:2.55422e-02 676:2.38612e-02 677:2.22786e-02 678:2.07902e-02 679:1.93919e-02 680:1.80794e-02 681:1.68482e-02 682:1.56919e-02 683:1.46045e-02 684:1.35806e-02 685:1.26157e-02 686:1.17070e-02 687:1.08561e-02 688:1.00648e-02 689:9.33338e-03 690:8.66128e-03 691:8.04605e-03 692:7.48113e-03 693:6.95999e-03 694:6.47707e-03 695:6.02768e-03 696:5.60817e-03 697:5.21669e-03 698:4.85179e-03 699:4.51201e-03 700:4.19594e-03 701:3.90206e-03 702:3.62837e-03 703:3.37301e-03 704:3.13432e-03 705:2.91086e-03 706:2.70153e-03 707:2.50580e-03 708:2.32323e-03 709:2.15333e-03 710:1.99556e-03 711:1.84932e-03 712:1.71398e-03 713:1.58890e-03 714:1.47345e-03 715:1.36702e-03 716:1.26895e-03 717:1.17842e-03 718:1.09464e-03 719:1.01694e-03 720:9.44727e-04 721:8.77517e-04 722:8.15044e-04 723:7.57076e-04 724:7.03376e-04 725:6.53705e-04 726:6.07805e-04 727:5.65344e-04 728:5.26005e-04 729:4.89506e-04 730:4.55597e-04 731:4.24055e-04 732:3.94686e-04 733:3.67318e-04 734:3.41794e-04 735:3.17974e-04 736:2.95744e-04 737:2.75056e-04 738:2.55864e-04 739:2.38114e-04 740:2.21745e-04 741:2.06671e-04 742:1.92747e-04 743:1.79831e-04 744:1.67802e-04 745:1.56557e-04 746:1.46017e-04 747:1.36153e-04 748:1.26945e-04 749:1.18367e-04 750:1.10393e-04 751:1.02991e-04 752:9.61184e-05 753:8.97332e-05 754:8.37969e-05 755:7.82744e-05 756:7.31331e-05 757:6.83414e-05 758:6.38704e-05 759:5.96939e-05 760:5.57886e-05 761:5.21351e-05 762:4.87218e-05 763:4.55385e-05 764:4.25744e-05 765:3.98188e-05 766:3.72588e-05 767:3.48747e-05 768:3.26477e-05 769:3.05614e-05 770:2.86018e-05 771:2.67584e-05 772:2.50294e-05 773:2.34137e-05 774:2.19091e-05 775:2.05126e-05 776:1.92190e-05 777:1.80180e-05 778:1.68990e-05 779:1.58531e-05 780:1.48724e-05 781:1.39509e-05 782:1.30853e-05 783:1.22733e-05 784:1.15123e-05 785:1.08000e-05 786:1.01336e-05 787:9.50992e-06 788:8.92563e-06 789:8.37785e-06 790:7.86392e-06 791:7.38154e-06 792:6.92910e-06 793:6.50514e-06 794:6.10822e-06 795:5.73694e-06 796:5.38983e-06 797:5.06527e-06 798:4.76167e-06 799:4.47756e-06 800:4.21160e-06 801:3.96246e-06 802:3.72867e-06 803:3.50888e-06 804:3.30187e-06 805:3.10656e-06 806:2.92212e-06 807:2.74821e-06 808:2.58456e-06 809:2.43087e-06 810:2.28679e-06 811:2.15191e-06 812:2.02566e-06 813:1.90746e-06 814:1.79679e-06 815:1.69315e-06 816:1.59603e-06 817:1.50490e-06 818:1.41925e-06 819:1.33860e-06 820:1.26256e-06 821:1.19077e-06 822:1.12303e-06 823:1.05915e-06 824:9.98951e-07 825:9.42251e-07 826:8.88880e-07 827:8.38669e-07 828:7.91454e-07 829:7.47077e-07 830:7.05386e-07} 'CIE 2008 10 Degree Physiologically Relevant LEF':{390:4.07678e-04 391:4.97777e-04 392:6.06475e-04 393:7.37004e-04 394:8.92939e-04 395:1.07817e-03 396:1.29682e-03 397:1.55316e-03 398:1.85146e-03 399:2.19579e-03 400:2.58977e-03 401:3.03680e-03 402:3.54193e-03 403:4.11142e-03 404:4.75262e-03 405:5.47421e-03 406:6.28503e-03 407:7.18807e-03 408:8.18179e-03 409:9.26042e-03 410:1.04130e-02 411:1.16264e-02 412:1.28988e-02 413:1.42344e-02 414:1.56408e-02 415:1.71297e-02 416:1.87127e-02 417:2.03839e-02 418:2.21294e-02 419:2.39299e-02 420:2.57613e-02 421:2.76016e-02 422:2.94551e-02 423:3.13388e-02 424:3.32758e-02 425:3.52955e-02 426:3.74271e-02 427:3.96714e-02 428:4.20200e-02 429:4.44617e-02 430:4.69823e-02 431:4.95674e-02 432:5.22122e-02 433:5.49139e-02 434:5.76692e-02 435:6.04743e-02 436:6.33220e-02 437:6.61927e-02 438:6.90619e-02 439:7.19019e-02 440:7.46829e-02 441:7.73845e-02 442:8.00360e-02 443:8.26852e-02 444:8.53875e-02 445:8.82054e-02 446:9.11893e-02 447:9.43104e-02 448:9.75135e-02 449:1.00735e-01 450:1.03903e-01 451:1.06964e-01 452:1.09968e-01 453:1.12999e-01 454:1.16154e-01 455:1.19539e-01 456:1.23250e-01 457:1.27305e-01 458:1.31696e-01 459:1.36418e-01 460:1.41459e-01 461:1.46800e-01 462:1.52400e-01 463:1.58202e-01 464:1.64140e-01 465:1.70137e-01 466:1.76123e-01 467:1.82090e-01 468:1.88046e-01 469:1.94006e-01 470:1.99986e-01 471:2.06005e-01 472:2.12098e-01 473:2.18304e-01 474:2.24669e-01 475:2.31243e-01 476:2.38074e-01 477:2.45180e-01 478:2.52568e-01 479:2.60248e-01 480:2.68227e-01 481:2.76501e-01 482:2.85004e-01 483:2.93647e-01 484:3.02332e-01 485:3.10944e-01 486:3.19410e-01 487:3.27868e-01 488:3.36526e-01 489:3.45618e-01 490:3.55402e-01 491:3.66089e-01 492:3.77586e-01 493:3.89696e-01 494:4.02195e-01 495:4.14823e-01 496:4.27354e-01 497:4.39821e-01 498:4.52336e-01 499:4.65030e-01 500:4.78048e-01 501:4.91517e-01 502:5.05422e-01 503:5.19706e-01 504:5.34301e-01 505:5.49134e-01 506:5.64130e-01 507:5.79242e-01 508:5.94426e-01 509:6.09639e-01 510:6.24830e-01 511:6.39966e-01 512:6.55094e-01 513:6.70290e-01 514:6.85638e-01 515:7.01229e-01 516:7.17110e-01 517:7.33092e-01 518:7.48904e-01 519:7.64253e-01 520:7.78820e-01 521:7.92341e-01 522:8.04851e-01 523:8.16475e-01 524:8.27352e-01 525:8.37636e-01 526:8.47465e-01 527:8.56887e-01 528:8.65924e-01 529:8.74604e-01 530:8.82955e-01 531:8.91027e-01 532:8.98949e-01 533:9.06875e-01 534:9.14965e-01 535:9.23386e-01 536:9.32232e-01 537:9.41286e-01 538:9.50238e-01 539:9.58765e-01 540:9.66532e-01 541:9.73250e-01 542:9.78842e-01 543:9.83287e-01 544:9.86572e-01 545:9.88689e-01 546:9.89706e-01 547:9.89985e-01 548:9.89962e-01 549:9.90073e-01 550:9.90750e-01 551:9.92283e-01 552:9.94384e-01 553:9.96622e-01 554:9.98565e-01 555:9.99778e-01 556:9.99944e-01 557:9.99220e-01 558:9.97879e-01 559:9.96193e-01 560:9.94430e-01 561:9.92783e-01 562:9.91158e-01 563:9.89392e-01 564:9.87329e-01 565:9.84813e-01 566:9.81725e-01 567:9.78071e-01 568:9.73886e-01 569:9.69203e-01 570:9.64055e-01 571:9.58441e-01 572:9.52238e-01 573:9.45297e-01 574:9.37477e-01 575:9.28649e-01 576:9.18795e-01 577:9.08301e-01 578:8.97635e-01 579:8.87240e-01 580:8.77536e-01 581:8.68792e-01 582:8.60747e-01 583:8.53023e-01 584:8.45253e-01 585:8.37084e-01 586:8.28241e-01 587:8.18732e-01 588:8.08635e-01 589:7.98030e-01 590:7.86995e-01 591:7.75604e-01 592:7.63900e-01 593:7.51916e-01 594:7.39683e-01 595:7.27231e-01 596:7.14588e-01 597:7.01793e-01 598:6.88887e-01 599:6.75910e-01 600:6.62904e-01 601:6.49891e-01 602:6.36841e-01 603:6.23709e-01 604:6.10454e-01 605:5.97037e-01 606:5.83440e-01 607:5.69704e-01 608:5.55889e-01 609:5.42047e-01 610:5.28230e-01 611:5.14475e-01 612:5.00788e-01 613:4.87169e-01 614:4.73616e-01 615:4.60131e-01 616:4.46726e-01 617:4.33459e-01 618:4.20392e-01 619:4.07581e-01 620:3.95076e-01 621:3.82889e-01 622:3.70919e-01 623:3.59045e-01 624:3.47162e-01 625:3.35179e-01 626:3.23056e-01 627:3.10886e-01 628:2.98784e-01 629:2.86853e-01 630:2.75181e-01 631:2.63834e-01 632:2.52833e-01 633:2.42183e-01 634:2.31890e-01 635:2.21956e-01 636:2.12383e-01 637:2.03170e-01 638:1.94318e-01 639:1.85825e-01 640:1.77688e-01 641:1.69893e-01 642:1.62382e-01 643:1.55099e-01 644:1.47992e-01 645:1.41020e-01 646:1.34161e-01 647:1.27440e-01 648:1.20889e-01 649:1.14534e-01 650:1.08400e-01 651:1.02501e-01 652:9.68459e-02 653:9.14394e-02 654:8.62832e-02 655:8.13769e-02 656:7.67171e-02 657:7.22940e-02 658:6.80970e-02 659:6.41155e-02 660:6.03398e-02 661:5.67605e-02 662:5.33699e-02 663:5.01603e-02 664:4.71241e-02 665:4.42538e-02 666:4.15421e-02 667:3.89804e-02 668:3.65609e-02 669:3.42760e-02 670:3.21185e-02 671:3.00819e-02 672:2.81600e-02 673:2.63470e-02 674:2.46373e-02 675:2.30257e-02 676:2.15074e-02 677:2.00784e-02 678:1.87347e-02 679:1.74727e-02 680:1.62884e-02 681:1.51777e-02 682:1.41347e-02 683:1.31541e-02 684:1.22309e-02 685:1.13611e-02 686:1.05419e-02 687:9.77505e-03 688:9.06196e-03 689:8.40296e-03 690:7.79746e-03 691:7.24323e-03 692:6.73438e-03 693:6.26500e-03 694:5.83009e-03 695:5.42539e-03 696:5.04763e-03 697:4.69514e-03 698:4.36659e-03 699:4.06069e-03 700:3.77614e-03 701:3.51158e-03 702:3.26521e-03 703:3.03534e-03 704:2.82050e-03 705:2.61937e-03 706:2.43096e-03 707:2.25480e-03 708:2.09049e-03 709:1.93759e-03 710:1.79560e-03 711:1.66399e-03 712:1.54220e-03 713:1.42964e-03 714:1.32575e-03 715:1.22998e-03 716:1.14173e-03 717:1.06027e-03 718:9.84885e-04 719:9.14970e-04 720:8.49990e-04 721:7.89516e-04 722:7.33304e-04 723:6.81146e-04 724:6.32829e-04 725:5.88138e-04 726:5.46839e-04 727:5.08635e-04 728:4.73240e-04 729:4.40402e-04 730:4.09893e-04 731:3.81514e-04 732:3.55090e-04 733:3.30467e-04 734:3.07503e-04 735:2.86072e-04 736:2.66072e-04 737:2.47459e-04 738:2.30192e-04 739:2.14223e-04 740:1.99495e-04 741:1.85934e-04 742:1.73407e-04 743:1.61786e-04 744:1.50964e-04 745:1.40847e-04 746:1.31364e-04 747:1.22490e-04 748:1.14206e-04 749:1.06489e-04 750:9.93144e-05 751:9.26551e-05 752:8.64722e-05 753:8.07278e-05 754:7.53872e-05 755:7.04188e-05 756:6.57934e-05 757:6.14825e-05 758:5.74601e-05 759:5.37027e-05 760:5.01893e-05 761:4.69024e-05 762:4.38317e-05 763:4.09678e-05 764:3.83012e-05 765:3.58222e-05 766:3.35190e-05 767:3.13742e-05 768:2.93707e-05 769:2.74938e-05 770:2.57308e-05 771:2.40725e-05 772:2.25170e-05 773:2.10635e-05 774:1.97099e-05 775:1.84535e-05 776:1.72898e-05 777:1.62093e-05 778:1.52026e-05 779:1.42617e-05 780:1.33795e-05 781:1.25504e-05 782:1.17717e-05 783:1.10412e-05 784:1.03566e-05 785:9.71580e-06 786:9.11632e-06 787:8.55520e-06 788:8.02956e-06 789:7.53677e-06 790:7.07442e-06 791:6.64046e-06 792:6.23344e-06 793:5.85204e-06 794:5.49496e-06 795:5.16095e-06 796:4.84869e-06 797:4.55671e-06 798:4.28358e-06 799:4.02799e-06 800:3.78873e-06 801:3.56460e-06 802:3.35428e-06 803:3.15656e-06 804:2.97033e-06 805:2.79463e-06 806:2.62870e-06 807:2.47225e-06 808:2.32503e-06 809:2.18677e-06 810:2.05715e-06 811:1.93581e-06 812:1.82224e-06 813:1.71591e-06 814:1.61636e-06 815:1.52311e-06 816:1.43575e-06 817:1.35377e-06 818:1.27671e-06 819:1.20417e-06 820:1.13576e-06 821:1.07118e-06 822:1.01024e-06 823:9.52778e-07 824:8.98622e-07 825:8.47617e-07 826:7.99605e-07 827:7.54436e-07 828:7.11962e-07 829:6.72042e-07 830:6.34538e-07}}<line_sep>SDS_LEFS_PHOTOPIC=LazyCaseInsensitiveMapping({'CIE 1924 Photopic Standard Observer':partial(SpectralDistribution DATA_LEFS_PHOTOPIC['CIE 1924 Photopic Standard Observer'] name='CIE 1924 Photopic Standard Observer') 'Judd Modified CIE 1951 Photopic Standard Observer':partial(SpectralDistribution DATA_LEFS_PHOTOPIC['Judd Modified CIE 1951 Photopic Standard Observer'] name='Judd Modified CIE 1951 Photopic Standard Observer') 'Judd-Vos Modified CIE 1978 Photopic Standard Observer':partial(SpectralDistribution DATA_LEFS_PHOTOPIC['Judd-Vos Modified CIE 1978 Photopic Standard Observer'] name='Judd-Vos Modified CIE 1978 Photopic Standard Observer') 'CIE 1964 Photopic 10 Degree Standard Observer':partial(SpectralDistribution DATA_LEFS_PHOTOPIC['CIE 1964 Photopic 10 Degree Standard Observer'] name='CIE 1964 Photopic 10 Degree Standard Observer' strict_name='CIE 1964 Photopic 10$^\\circ$ Standard Observer') 'CIE 2008 2 Degree Physiologically Relevant LEF':partial(SpectralDistribution DATA_LEFS_PHOTOPIC['CIE 2008 2 Degree Physiologically Relevant LEF'] name='CIE 2008 2 Degree Physiologically Relevant LEF' strict_name='CIE 2008 2$^\\circ$ Physiologically Relevant LEF') 'CIE 2008 10 Degree Physiologically Relevant LEF':partial(SpectralDistribution DATA_LEFS_PHOTOPIC['CIE 2008 10 Degree Physiologically Relevant LEF'] name='CIE 2008 10 Degree Physiologically Relevant LEF' strict_name='CIE 2008 10$^\\circ$ Physiologically Relevant LEF')})<line_sep>SDS_LEFS_PHOTOPIC.__doc__=""" Spectral distributions of the photopic luminous efficiency functions. References ---------- :cite:`CVRLq`, :cite:`CVRLs` SDS_LEFS_PHOTOPIC : LazyCaseInsensitiveMapping **{'CIE 1924 Photopic Standard Observer', 'Judd Modified CIE 1951 Photopic Standard Observer', 'Judd-Vos Modified CIE 1978 Photopic Standard Observer', 'CIE 1964 Photopic 10 Degree Standard Observer', 'CIE 2008 2 Degree Physiologically Relevant LEF', 'CIE 2008 10 Degree Physiologically Relevant LEF'}** Aliases: - 'cie_2_1924': 'CIE 1931 2 Degree Standard Observer' - 'cie_10_1964': 'CIE 1964 Photopic 10 Degree Standard Observer' """<line_sep>SDS_LEFS_PHOTOPIC['cie_2_1924']=(SDS_LEFS_PHOTOPIC['CIE 1924 Photopic Standard Observer'])<line_sep>SDS_LEFS_PHOTOPIC['cie_10_1964']=(SDS_LEFS_PHOTOPIC['CIE 1964 Photopic 10 Degree Standard Observer'])<line_sep>DATA_LEFS_SCOTOPIC={'CIE 1951 Scotopic Standard Observer':{380:0.0005890000 381:0.0006650000 382:0.0007520000 383:0.0008540000 384:0.0009720000 385:0.0011080000 386:0.0012680000 387:0.0014530000 388:0.0016680000 389:0.0019180000 390:0.0022090000 391:0.0025470000 392:0.0029390000 393:0.0033940000 394:0.0039210000 395:0.0045300000 396:0.0052400000 397:0.0060500000 398:0.0069800000 399:0.0080600000 400:0.0092900000 401:0.0107000000 402:0.0123100000 403:0.0141300000 404:0.0161900000 405:0.0185200000 406:0.0211300000 407:0.0240500000 408:0.0273000000 409:0.0308900000 410:0.0348400000 411:0.0391600000 412:0.0439000000 413:0.0490000000 414:0.0545000000 415:0.0604000000 416:0.0668000000 417:0.0736000000 418:0.0808000000 419:0.0885000000 420:0.0966000000 421:0.1052000000 422:0.1141000000 423:0.1235000000 424:0.1334000000 425:0.1436000000 426:0.1541000000 427:0.1651000000 428:0.1764000000 429:0.1879000000 430:0.1998000000 431:0.2119000000 432:0.2243000000 433:0.2369000000 434:0.2496000000 435:0.2625000000 436:0.2755000000 437:0.2886000000 438:0.3017000000 439:0.3149000000 440:0.3281000000 441:0.3412000000 442:0.3543000000 443:0.3673000000 444:0.3803000000 445:0.3931000000 446:0.4060000000 447:0.4180000000 448:0.4310000000 449:0.4430000000 450:0.4550000000 451:0.4670000000 452:0.4790000000 453:0.4900000000 454:0.5020000000 455:0.5130000000 456:0.5240000000 457:0.5350000000 458:0.5460000000 459:0.5570000000 460:0.5670000000 461:0.5780000000 462:0.5880000000 463:0.5990000000 464:0.6100000000 465:0.6200000000 466:0.6310000000 467:0.6420000000 468:0.6530000000 469:0.6640000000 470:0.6760000000 471:0.6870000000 472:0.6990000000 473:0.7100000000 474:0.7220000000 475:0.7340000000 476:0.7450000000 477:0.7570000000 478:0.7690000000 479:0.7810000000 480:0.7930000000 481:0.8050000000 482:0.8170000000 483:0.8280000000 484:0.8400000000 485:0.8510000000 486:0.8620000000 487:0.8730000000 488:0.8840000000 489:0.8940000000 490:0.9040000000 491:0.9140000000 492:0.9230000000 493:0.9320000000 494:0.9410000000 495:0.9490000000 496:0.9570000000 497:0.9640000000 498:0.9700000000 499:0.9760000000 500:0.9820000000 501:0.9860000000 502:0.9900000000 503:0.9940000000 504:0.9970000000 505:0.9980000000 506:1.0000000000 507:1.0000000000 508:1.0000000000 509:0.9980000000 510:0.9970000000 511:0.9940000000 512:0.9900000000 513:0.9860000000 514:0.9810000000 515:0.9750000000 516:0.9680000000 517:0.9610000000 518:0.9530000000 519:0.9440000000 520:0.9350000000 521:0.9250000000 522:0.9150000000 523:0.9040000000 524:0.8920000000 525:0.8800000000 526:0.8670000000 527:0.8540000000 528:0.8400000000 529:0.8260000000 530:0.8110000000 531:0.7960000000 532:0.7810000000 533:0.7650000000 534:0.7490000000 535:0.7330000000 536:0.7170000000 537:0.7000000000 538:0.6830000000 539:0.6670000000 540:0.6500000000 541:0.6330000000 542:0.6160000000 543:0.5990000000 544:0.5810000000 545:0.5640000000 546:0.5480000000 547:0.5310000000 548:0.5140000000 549:0.4970000000 550:0.4810000000 551:0.4650000000 552:0.4480000000 553:0.4330000000 554:0.4170000000 555:0.4020000000 556:0.3864000000 557:0.3715000000 558:0.3569000000 559:0.3427000000 560:0.3288000000 561:0.3151000000 562:0.3018000000 563:0.2888000000 564:0.2762000000 565:0.2639000000 566:0.2519000000 567:0.2403000000 568:0.2291000000 569:0.2182000000 570:0.2076000000 571:0.1974000000 572:0.1876000000 573:0.1782000000 574:0.1690000000 575:0.1602000000 576:0.1517000000 577:0.1436000000 578:0.1358000000 579:0.1284000000 580:0.1212000000 581:0.1143000000 582:0.1078000000 583:0.1015000000 584:0.0956000000 585:0.0899000000 586:0.0845000000 587:0.0793000000 588:0.0745000000 589:0.0699000000 590:0.0655000000 591:0.0613000000 592:0.0574000000 593:0.0537000000 594:0.0502000000 595:0.0469000000 596:0.0438000000 597:0.0409000000 598:0.0381600000 599:0.0355800000 600:0.0331500000 601:0.0308700000 602:0.0287400000 603:0.0267400000 604:0.0248700000 605:0.0231200000 606:0.0214700000 607:0.0199400000 608:0.0185100000 609:0.0171800000 610:0.0159300000 611:0.0147700000 612:0.0136900000 613:0.0126900000 614:0.0117500000 615:0.0108800000 616:0.0100700000 617:0.0093200000 618:0.0086200000 619:0.0079700000 620:0.0073700000 621:0.0068200000 622:0.0063000000 623:0.0058200000 624:0.0053800000 625:0.0049700000 626:0.0045900000 627:0.0042400000 628:0.0039130000 629:0.0036130000 630:0.0033350000 631:0.0030790000 632:0.0028420000 633:0.0026230000 634:0.0024210000 635:0.0022350000 636:0.0020620000 637:0.0019030000 638:0.0017570000 639:0.0016210000 640:0.0014970000 641:0.0013820000 642:0.0012760000 643:0.0011780000 644:0.0010880000 645:0.0010050000 646:0.0009280000 647:0.0008570000 648:0.0007920000 649:0.0007320000 650:0.0006770000 651:0.0006260000 652:0.0005790000 653:0.0005360000 654:0.0004960000 655:0.0004590000 656:0.0004250000 657:0.0003935000 658:0.0003645000 659:0.0003377000 660:0.0003129000 661:0.0002901000 662:0.0002689000 663:0.0002493000 664:0.0002313000 665:0.0002146000 666:0.0001991000 667:0.0001848000 668:0.0001716000 669:0.0001593000 670:0.0001480000 671:0.0001375000 672:0.0001277000 673:0.0001187000 674:0.0001104000 675:0.0001026000 676:0.0000954000 677:0.0000888000 678:0.0000826000 679:0.0000769000 680:0.0000715000 681:0.0000666000 682:0.0000620000 683:0.0000578000 684:0.0000538000 685:0.0000501000 686:0.0000467000 687:0.0000436000 688:0.0000406000 689:0.0000378900 690:0.0000353300 691:0.0000329500 692:0.0000307500 693:0.0000287000 694:0.0000267900 695:0.0000250100 696:0.0000233600 697:0.0000218200 698:0.0000203800 699:0.0000190500 700:0.0000178000 701:0.0000166400 702:0.0000155600 703:0.0000145400 704:0.0000136000 705:0.0000127300 706:0.0000119100 707:0.0000111400 708:0.0000104300 709:0.0000097600 710:0.0000091400 711:0.0000085600 712:0.0000080200 713:0.0000075100 714:0.0000070400 715:0.0000066000 716:0.0000061800 717:0.0000058000 718:0.0000054400 719:0.0000051000 720:0.0000047800 721:0.0000044900 722:0.0000042100 723:0.0000039510 724:0.0000037090 725:0.0000034820 726:0.0000032700 727:0.0000030700 728:0.0000028840 729:0.0000027100 730:0.0000025460 731:0.0000023930 732:0.0000022500 733:0.0000021150 734:0.0000019890 735:0.0000018700 736:0.0000017590 737:0.0000016550 738:0.0000015570 739:0.0000014660 740:0.0000013790 741:0.0000012990 742:0.0000012230 743:0.0000011510 744:0.0000010840 745:0.0000010220 746:0.0000009620 747:0.0000009070 748:0.0000008550 749:0.0000008060 750:0.0000007600 751:0.0000007160 752:0.0000006750 753:0.0000006370 754:0.0000006010 755:0.0000005670 756:0.0000005350 757:0.0000005050 758:0.0000004770 759:0.0000004500 760:0.0000004250 761:0.0000004010 762:0.0000003790 763:0.0000003580 764:0.0000003382 765:0.0000003196 766:0.0000003021 767:0.0000002855 768:0.0000002699 769:0.0000002552 770:0.0000002413 771:0.0000002282 772:0.0000002159 773:0.0000002042 774:0.0000001932 775:0.0000001829 776:0.0000001731 777:0.0000001638 778:0.0000001551 779:0.0000001468 780:0.0000001390 }}<line_sep>SDS_LEFS_SCOTOPIC=LazyCaseInsensitiveMapping({'CIE 1951 Scotopic Standard Observer':partial(SpectralDistribution DATA_LEFS_SCOTOPIC['CIE 1951 Scotopic Standard Observer'] name='CIE 1951 Scotopic Standard Observer')})<line_sep>SDS_LEFS_SCOTOPIC.__doc__=""" Spectral distributions of the scotopic luminous efficiency functions. References ---------- :cite:`CVRLs` SDS_LEFS_SCOTOPIC : LazyCaseInsensitiveMapping **{'CIE 1951 Scotopic Standard Observer', }** Aliases: - 'cie_1951': 'CIE 1951 Scotopic Standard Observer' """<line_sep>SDS_LEFS_SCOTOPIC['cie_1951']=(SDS_LEFS_SCOTOPIC['CIE 1951 Scotopic Standard Observer'])<line_sep>SDS_LEFS=LazyCaseInsensitiveMapping(SDS_LEFS_PHOTOPIC)<line_sep>SDS_LEFS.__doc__=""" Spectral distributions of the luminous efficiency functions. References ---------- :cite:`CVRLq`, :cite:`CVRLs`, :cite:`Wikipedia2005d` SDS_LEFS : LazyCaseInsensitiveMapping **{'CIE 1924 Photopic Standard Observer', 'Judd Modified CIE 1951 Photopic Standard Observer', 'Judd-Vos Modified CIE 1978 Photopic Standard Observer', 'CIE 1964 Photopic 10 Degree Standard Observer', 'CIE 2008 2 Degree Physiologically Relevant LEF', 'CIE 2008 10 Degree Physiologically Relevant LEF', 'CIE 1951 Scotopic Standard Observer'}** """<line_sep>SDS_LEFS.update(SDS_LEFS_SCOTOPIC)<line_sep>DATA_MESOPIC_X={0.01:CaseInsensitiveMapping({'Blue Heavy':CaseInsensitiveMapping({'MOVE':0.13 'LRC':0.04}) 'Red Heavy':CaseInsensitiveMapping({'MOVE':0.00 'LRC':0.01})}) 0.1:CaseInsensitiveMapping({'Blue Heavy':CaseInsensitiveMapping({'MOVE':0.42 'LRC':0.28}) 'Red Heavy':CaseInsensitiveMapping({'MOVE':0.34 'LRC':0.11})}) 1.0:CaseInsensitiveMapping({'Blue Heavy':CaseInsensitiveMapping({'MOVE':0.70 'LRC':1.00}) 'Red Heavy':CaseInsensitiveMapping({'MOVE':0.68 'LRC':1.00})}) 10:CaseInsensitiveMapping({'Blue Heavy':CaseInsensitiveMapping({'MOVE':0.98 'LRC':1.00}) 'Red Heavy':CaseInsensitiveMapping({'MOVE':0.98 'LRC':1.00})})}<line_sep>""" Weighting factors for the mesopic luminous efficiency function calculation. DATA_MESOPIC_X : CaseInsensitiveMapping """<line_sep>
<import_stmt>unittest<import_stmt>shelve<import_stmt>glob<import_from_stmt>test support<import_from_stmt>collections.abc MutableMapping<import_from_stmt>test.test_dbm dbm_iterator<def_stmt>L1 s<block_start><return>s.decode("latin-1")<block_end><class_stmt>byteskeydict(MutableMapping)<block_start>"Mapping that supports bytes keys"<def_stmt>__init__ self<block_start>self.d={}<block_end><def_stmt>__getitem__ self key<block_start><return>self.d[L1(key)]<block_end><def_stmt>__setitem__ self key value<block_start>self.d[L1(key)]=value<block_end><def_stmt>__delitem__ self key<block_start><del_stmt>self.d[L1(key)]<block_end><def_stmt>__len__ self<block_start><return>len(self.d)<block_end><def_stmt>iterkeys self<block_start><for_stmt>k self.d.keys()<block_start><yield>k.encode("latin-1")<block_end><block_end>__iter__=iterkeys<def_stmt>keys self<block_start><return>list(self.iterkeys())<block_end><def_stmt>copy self<block_start><return>byteskeydict(self.d)<block_end><block_end><class_stmt>TestCase(unittest.TestCase)<block_start>fn="shelftemp.db"<def_stmt>tearDown self<block_start><for_stmt>f glob.glob(self.fn+"*")<block_start>support.unlink(f)<block_end><block_end><def_stmt>test_close self<block_start>d1={}<line_sep>s=shelve.Shelf(d1 protocol=2 writeback=<false>)<line_sep>s['key1']=[1 2 3 4]<line_sep>self.assertEqual(s['key1'] [1 2 3 4])<line_sep>self.assertEqual(len(s) 1)<line_sep>s.close()<line_sep>self.assertRaises(ValueError len s)<try_stmt><block_start>s['key1']<block_end><except_stmt>ValueError<block_start><pass><block_end><else_stmt><block_start>self.fail('Closed shelf should not find a key')<block_end><block_end><def_stmt>test_ascii_file_shelf self<block_start>s=shelve.open(self.fn protocol=0)<try_stmt><block_start>s['key1']=(1 2 3 4)<line_sep>self.assertEqual(s['key1'] (1 2 3 4))<block_end><finally_stmt><block_start>s.close()<block_end><block_end><def_stmt>test_binary_file_shelf self<block_start>s=shelve.open(self.fn protocol=1)<try_stmt><block_start>s['key1']=(1 2 3 4)<line_sep>self.assertEqual(s['key1'] (1 2 3 4))<block_end><finally_stmt><block_start>s.close()<block_end><block_end><def_stmt>test_proto2_file_shelf self<block_start>s=shelve.open(self.fn protocol=2)<try_stmt><block_start>s['key1']=(1 2 3 4)<line_sep>self.assertEqual(s['key1'] (1 2 3 4))<block_end><finally_stmt><block_start>s.close()<block_end><block_end><def_stmt>test_in_memory_shelf self<block_start>d1=byteskeydict()<line_sep>s=shelve.Shelf(d1 protocol=0)<line_sep>s['key1']=(1 2 3 4)<line_sep>self.assertEqual(s['key1'] (1 2 3 4))<line_sep>s.close()<line_sep>d2=byteskeydict()<line_sep>s=shelve.Shelf(d2 protocol=1)<line_sep>s['key1']=(1 2 3 4)<line_sep>self.assertEqual(s['key1'] (1 2 3 4))<line_sep>s.close()<line_sep>self.assertEqual(len(d1) 1)<line_sep>self.assertEqual(len(d2) 1)<line_sep>self.assertNotEqual(d1.items() d2.items())<block_end><def_stmt>test_mutable_entry self<block_start>d1=byteskeydict()<line_sep>s=shelve.Shelf(d1 protocol=2 writeback=<false>)<line_sep>s['key1']=[1 2 3 4]<line_sep>self.assertEqual(s['key1'] [1 2 3 4])<line_sep>s['key1'].append(5)<line_sep>self.assertEqual(s['key1'] [1 2 3 4])<line_sep>s.close()<line_sep>d2=byteskeydict()<line_sep>s=shelve.Shelf(d2 protocol=2 writeback=<true>)<line_sep>s['key1']=[1 2 3 4]<line_sep>self.assertEqual(s['key1'] [1 2 3 4])<line_sep>s['key1'].append(5)<line_sep>self.assertEqual(s['key1'] [1 2 3 4 5])<line_sep>s.close()<line_sep>self.assertEqual(len(d1) 1)<line_sep>self.assertEqual(len(d2) 1)<block_end><def_stmt>test_keyencoding self<block_start>d={}<line_sep>key='Pöp'<line_sep># the default keyencoding is utf-8 shelve.Shelf(d)[key]=[1]<line_sep>self.assertIn(key.encode('utf-8') d)<line_sep># but a different one can be given shelve.Shelf(d keyencoding='latin-1')[key]=[1]<line_sep>self.assertIn(key.encode('latin-1') d)<line_sep># with all consequences s=shelve.Shelf(d keyencoding='ascii')<line_sep>self.assertRaises(UnicodeEncodeError s.__setitem__ key [1])<block_end><def_stmt>test_writeback_also_writes_immediately self# Issue 5754 <block_start>d={}<line_sep>key='key'<line_sep>encodedkey=key.encode('utf-8')<line_sep>s=shelve.Shelf(d writeback=<true>)<line_sep>s[key]=[1]<line_sep>p1=d[encodedkey]# Will give a KeyError if backing store not updated s['key'].append(2)<line_sep>s.close()<line_sep>p2=d[encodedkey]<line_sep>self.assertNotEqual(p1 p2)<block_end><block_end># Write creates new object in store <import_from_stmt>test mapping_tests<class_stmt>TestShelveBase(mapping_tests.BasicTestMappingProtocol)<block_start>fn="shelftemp.db"<line_sep>counter=0<def_stmt>__init__ self *args **kw<block_start>self._db=[]<line_sep>mapping_tests.BasicTestMappingProtocol.__init__(self *args **kw)<block_end>type2test=shelve.Shelf<def_stmt>_reference self<block_start><return>{"key1":"value1" "key2":2 "key3":(1 2 3)}<block_end><def_stmt>_empty_mapping self<block_start><if_stmt>self._in_mem<block_start>x=shelve.Shelf(byteskeydict() **self._args)<block_end><else_stmt><block_start>self.counter<augadd>1<line_sep>x=shelve.open(self.fn+str(self.counter) **self._args)<block_end>self._db.append(x)<line_sep><return>x<block_end><def_stmt>tearDown self<block_start><for_stmt>db self._db<block_start>db.close()<block_end>self._db=[]<if_stmt><not>self._in_mem<block_start><for_stmt>f glob.glob(self.fn+"*")<block_start>support.unlink(f)<block_end><block_end><block_end><block_end><class_stmt>TestAsciiFileShelve(TestShelveBase)<block_start>_args={'protocol':0}<line_sep>_in_mem=<false><block_end><class_stmt>TestBinaryFileShelve(TestShelveBase)<block_start>_args={'protocol':1}<line_sep>_in_mem=<false><block_end><class_stmt>TestProto2FileShelve(TestShelveBase)<block_start>_args={'protocol':2}<line_sep>_in_mem=<false><block_end><class_stmt>TestAsciiMemShelve(TestShelveBase)<block_start>_args={'protocol':0}<line_sep>_in_mem=<true><block_end><class_stmt>TestBinaryMemShelve(TestShelveBase)<block_start>_args={'protocol':1}<line_sep>_in_mem=<true><block_end><class_stmt>TestProto2MemShelve(TestShelveBase)<block_start>_args={'protocol':2}<line_sep>_in_mem=<true><block_end><def_stmt>test_main <block_start><for_stmt>module dbm_iterator()<block_start>support.run_unittest(TestAsciiFileShelve TestBinaryFileShelve TestProto2FileShelve TestAsciiMemShelve TestBinaryMemShelve TestProto2MemShelve TestCase)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test_main()<block_end>
<import_stmt>copy<import_stmt>sys<if_stmt>sys.version_info<l>(3 )<block_start>range=xrange<block_end><import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>scipy.stats<as>ss<import_from_stmt>patsy dmatrices dmatrix demo_data<import_from_stmt>.. families<as>fam<import_from_stmt>.. tsm<as>tsm<import_from_stmt>.. data_check<as>dc<import_from_stmt>.kalman *<class_stmt>DAR(tsm.TSM)<block_start>""" Inherits time series methods from TSM class. **** DYNAMIC AUTOREGRESSIVE MODEL **** Parameters ---------- ar : int Number of autoregressive lags data : pd.DataFrame Field to specify the data that will be used """<def_stmt>__init__ self data ar integ=0 target=<none># Initialize TSM object <block_start>super(DAR self).__init__('DAR')<line_sep># Latent Variable information self.ar=ar<line_sep>self.integ=integ<line_sep>self.target=target<line_sep>self.model_name="DAR("+str(self.ar)+", integrated="+str(self.integ)+")"<line_sep>self.max_lag=self.ar<line_sep>self._z_hide=0# Whether to cutoff latent variables from results table self.supported_methods=["MLE" "PML" "Laplace" "M-H" "BBVI"]<line_sep>self.default_method="MLE"<line_sep>self.multivariate_model=<false><line_sep># Format the data self.data_original=data.copy()<line_sep>self.data,self.data_name,self.is_pandas,self.index=dc.data_check(data target)<line_sep>self.data=self.data.astype(np.float)# treat as float for Cython self.data_original_nondf=self.data.copy()<line_sep># Difference data <for_stmt>order range(0 self.integ)<block_start>self.data=np.diff(self.data)<line_sep>self.data_name="Differenced "+self.data_name<block_end>self.X=self._ar_matrix()<line_sep>self.data=self.data[self.max_lag:]<line_sep>self.y=self.data<line_sep>self.y_name=self.data_name<line_sep>self._create_latent_variables()<line_sep>self.z_no=len(self.latent_variables.z_list)<block_end><def_stmt>_ar_matrix self<block_start>""" Creates Autoregressive matrix Returns ---------- X : np.ndarray Autoregressive Matrix """<line_sep>Y=np.array(self.data[self.max_lag:self.data.shape[0]])<line_sep>X=np.ones(Y.shape[0])<if_stmt>self.ar<ne>0<block_start><for_stmt>i range(0 self.ar)<block_start>X=np.vstack((X self.data[(self.max_lag-i-1):-i-1]))<block_end><block_end><return>X.T<block_end><def_stmt>_create_latent_variables self<block_start>""" Creates model latent variables Returns ---------- None (changes model attributes) """<line_sep>self.latent_variables.add_z('Sigma^2 irregular' fam.Flat(transform='exp') fam.Normal(0 3))<line_sep>self.latent_variables.add_z('Constant' fam.Flat(transform=<none>) fam.Normal(0 3))<for_stmt>parm range(1 self.ar+1)<block_start>self.latent_variables.add_z('Sigma^2 AR('+str(parm)+')' fam.Flat(transform='exp') fam.Normal(0 3))<block_end><block_end><def_stmt>_forecast_model self beta Z h<block_start>""" Creates forecasted states and variances Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- a : np.ndarray Forecasted states P : np.ndarray Variance of forecasted states """<line_sep>T,_,R,Q,H=self._ss_matrices(beta)<line_sep><return>dl_univariate_kalman_fcst(self.data Z H T Q R 0.0 h)<block_end><def_stmt>_model self data beta<block_start>""" Creates the structure of the model Parameters ---------- data : np.array Contains the time series beta : np.array Contains untransformed starting values for latent variables Returns ---------- a,P,K,F,v : np.array Filted states, filtered variances, Kalman gains, F matrix, residuals """<line_sep>T,Z,R,Q,H=self._ss_matrices(beta)<line_sep><return>dl_univariate_kalman(data Z H T Q R 0.0)<block_end><def_stmt>_ss_matrices self beta<block_start>""" Creates the state space matrices required Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- T, Z, R, Q, H : np.array State space matrices used in KFS algorithm """<line_sep>T=np.identity(self.z_no-1)<line_sep>H=np.identity(1)<times>self.latent_variables.z_list[0].prior.transform(beta[0])<line_sep>Z=self.X<line_sep>R=np.identity(self.z_no-1)<line_sep>Q=np.identity(self.z_no-1)<for_stmt>i range(0 self.z_no-1)<block_start>Q[i][i]=self.latent_variables.z_list[i+1].prior.transform(beta[i+1])<block_end><return>T Z R Q H<block_end><def_stmt>neg_loglik self beta<block_start>""" Creates the negative log marginal likelihood of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- The negative log logliklihood of the model """<line_sep>_,_,_,F,v=self._model(self.y beta)<line_sep>loglik=0.0<for_stmt>i range(0 self.y.shape[0])<block_start>loglik<augadd>np.linalg.slogdet(F[: : i])[1]+np.dot(v[i] np.dot(np.linalg.pinv(F[: : i]) v[i]))<block_end><return>-(-((self.y.shape[0]/2)<times>np.log(2<times>np.pi))-0.5<times>loglik.T[0].sum())<block_end><def_stmt>plot_predict self h=5 past_values=20 intervals=<true> **kwargs<block_start>""" Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? Returns ---------- - Plot of the forecast """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>figsize=kwargs.get('figsize' (10 7))<if_stmt>self.latent_variables.estimated<is><false><block_start><raise>Exception("No latent variables estimated!")<block_end><else_stmt><block_start>y_holder=self.y.copy()# holds past data and predicted data to create AR matrix full_X=self.X.copy()<line_sep>full_X=np.append(full_X np.array([np.append(1.0 y_holder[-self.ar:][::-1])]) axis=0)<line_sep>Z=full_X<line_sep># Construct Z matrix <for_stmt>step range(h)<block_start>a,P=self._forecast_model(self.latent_variables.get_z_values() Z step)<line_sep>new_value=np.dot(Z[-1 :] a[: self.y.shape[0]+step])<line_sep>y_holder=np.append(y_holder new_value)<line_sep>Z=np.append(Z np.array([np.append(1.0 y_holder[-self.ar:][::-1])]) axis=0)<block_end># Retrieve data, dates and (transformed) latent variables a,P=self._forecast_model(self.latent_variables.get_z_values() Z h)<line_sep>smoothed_series=np.zeros(self.y.shape[0]+h)<line_sep>series_variance=np.zeros(self.y.shape[0]+h)<for_stmt>t range(self.y.shape[0]+h)<block_start>smoothed_series[t]=np.dot(Z[t] a[: t])<line_sep>series_variance[t]=np.dot(np.dot(Z[t] P[: : t]) Z[t].T)+self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0])<block_end>date_index=self.shift_dates(h)<line_sep>plot_values=smoothed_series[-h-past_values:]<line_sep>forecasted_values=smoothed_series[-h:]<line_sep>lower=forecasted_values-1.98<times>np.power(series_variance[-h:] 0.5)<line_sep>upper=forecasted_values+1.98<times>np.power(series_variance[-h:] 0.5)<line_sep>lower=np.append(plot_values[-h-1] lower)<line_sep>upper=np.append(plot_values[-h-1] upper)<line_sep>plot_index=date_index[-h-past_values:]<line_sep>plt.figure(figsize=figsize)<if_stmt>intervals<eq><true><block_start>plt.fill_between(date_index[-h-1:] lower upper alpha=0.2)<block_end>plt.plot(plot_index plot_values)<line_sep>plt.title("Forecast for "+self.y_name)<line_sep>plt.xlabel("Time")<line_sep>plt.ylabel(self.y_name)<line_sep>plt.show()<block_end><block_end><def_stmt>plot_fit self intervals=<false> **kwargs<block_start>""" Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit) """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>figsize=kwargs.get('figsize' (10 7))<line_sep>series_type=kwargs.get('series_type' 'Smoothed')<if_stmt>self.latent_variables.estimated<is><false><block_start><raise>Exception("No latent variables estimated!")<block_end><else_stmt><block_start>date_index=copy.deepcopy(self.index)<line_sep>date_index=date_index[self.integ+self.ar:]<if_stmt>series_type<eq>'Smoothed'<block_start>mu,V=self.smoothed_state(self.data self.latent_variables.get_z_values())<block_end><elif_stmt>series_type<eq>'Filtered'<block_start>mu,V,_,_,_=self._model(self.data self.latent_variables.get_z_values())<block_end><else_stmt><block_start>mu,V=self.smoothed_state(self.data self.latent_variables.get_z_values())<block_end># Create smoothed/filtered aggregate series _,Z,_,_,_=self._ss_matrices(self.latent_variables.get_z_values())<line_sep>smoothed_series=np.zeros(self.y.shape[0])<for_stmt>t range(0 self.y.shape[0])<block_start>smoothed_series[t]=np.dot(Z[t] mu[: t])<block_end>plt.figure(figsize=figsize)<line_sep>plt.subplot(self.z_no+1 1 1)<line_sep>plt.title(self.y_name+" Raw and "+series_type)<line_sep>plt.plot(date_index self.data label='Data')<line_sep>plt.plot(date_index smoothed_series label=series_type c='black')<line_sep>plt.legend(loc=2)<for_stmt>coef range(0 self.z_no-1)<block_start>V_coef=V[0][coef][:-1]<line_sep>plt.subplot(self.z_no+1 1 2+coef)<line_sep>plt.title("Beta "+self.latent_variables.z_list[1+coef].name)<if_stmt>intervals<eq><true><block_start>alpha=[0.15<times>i/float(100)<for>i range(50 12 -2)]<line_sep>plt.fill_between(date_index[5:] mu[coef 0:mu.shape[1]-1][5:]+1.98<times>np.sqrt(V_coef[5:]) mu[coef 0:mu.shape[1]-1][5:]-1.98<times>np.sqrt(V_coef[5:]) alpha=0.15 label='95% C.I.')<block_end>plt.plot(date_index mu[coef 0:mu.shape[1]-1] label='Data')<line_sep>plt.legend(loc=2)<block_end>plt.subplot(self.z_no+1 1 self.z_no+1)<line_sep>plt.title("Measurement Error")<line_sep>plt.plot(date_index self.data-smoothed_series label='Irregular')<line_sep>plt.legend(loc=2)<line_sep>plt.show()<block_end><block_end><def_stmt>predict self h=5<block_start>""" Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? Returns ---------- - pd.DataFrame with predictions """<if_stmt>self.latent_variables.estimated<is><false><block_start><raise>Exception("No latent variables estimated!")<block_end><else_stmt><block_start>y_holder=self.y.copy()# holds past data and predicted data to create AR matrix full_X=self.X.copy()<line_sep>full_X=np.append(full_X np.array([np.append(1.0 y_holder[-self.ar:][::-1])]) axis=0)<line_sep>Z=full_X<for_stmt>step range(h)<block_start>a,P=self._forecast_model(self.latent_variables.get_z_values() Z step)<line_sep>new_value=np.dot(Z[-1 :] a[: self.y.shape[0]+step])<line_sep>y_holder=np.append(y_holder new_value)<line_sep>Z=np.append(Z np.array([np.append(1.0 y_holder[-self.ar:][::-1])]) axis=0)<block_end>date_index=self.shift_dates(h)<line_sep>result=pd.DataFrame(y_holder[-h:])<line_sep>result.rename(columns={0:self.y_name} inplace=<true>)<line_sep>result.index=date_index[-h:]<line_sep><return>result<block_end><block_end><def_stmt>predict_is self h=5 fit_once=<true><block_start>""" Makes dynamic in-sample predictions with the estimated model Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint Returns ---------- - pd.DataFrame with predicted values """<line_sep>predictions=[]<for_stmt>t range(0 h)<block_start>data1=self.data_original_nondf[:-h+t]<line_sep>x=DAR(data=data1 ar=self.ar integ=self.integ)<if_stmt>fit_once<is><false><block_start>x.fit(printer=<false>)<block_end><if_stmt>t<eq>0<block_start><if_stmt>fit_once<is><true><block_start>x.fit(printer=<false>)<line_sep>saved_lvs=x.latent_variables<block_end>predictions=x.predict(1)<block_end><else_stmt><block_start><if_stmt>fit_once<is><true><block_start>x.latent_variables=saved_lvs<block_end>predictions=pd.concat([predictions x.predict(1)])<block_end><block_end>predictions.rename(columns={0:self.y_name} inplace=<true>)<line_sep>predictions.index=self.index[-h:]<line_sep><return>predictions<block_end><def_stmt>plot_predict_is self h=5 **kwargs<block_start>""" Plots forecasts with the estimated model against data (Simulated prediction with data) Parameters ---------- h : int (default : 5) How many steps to forecast Returns ---------- - Plot of the forecast against data """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>figsize=kwargs.get('figsize' (10 7))<line_sep>plt.figure(figsize=figsize)<line_sep>predictions=self.predict_is(h)<line_sep>data=self.data[-h:]<line_sep>plt.plot(predictions.index data label='Data')<line_sep>plt.plot(predictions.index predictions label='Predictions' c='black')<line_sep>plt.title(self.y_name)<line_sep>plt.legend(loc=2)<line_sep>plt.show()<block_end><def_stmt>simulation_smoother self beta<block_start>""" Koopman's simulation smoother - simulates from states given model parameters and observations Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- - A simulated state evolution """<line_sep>T,Z,R,Q,H=self._ss_matrices(beta)<line_sep># Generate e_t+ and n_t+ rnd_h=np.random.normal(0 np.sqrt(H) self.data.shape[0]+1)<line_sep>q_dist=ss.multivariate_normal([0.0 0.0] Q)<line_sep>rnd_q=q_dist.rvs(self.data.shape[0]+1)<line_sep># Generate a_t+ and y_t+ a_plus=np.zeros((T.shape[0] self.data.shape[0]+1))<line_sep>a_plus[0 0]=np.mean(self.data[0:5])<line_sep>y_plus=np.zeros(self.data.shape[0])<for_stmt>t range(0 self.data.shape[0]+1)<block_start><if_stmt>t<eq>0<block_start>a_plus[: t]=np.dot(T a_plus[: t])+rnd_q[t :]<line_sep>y_plus[t]=np.dot(Z a_plus[: t])+rnd_h[t]<block_end><else_stmt><block_start><if_stmt>t<ne>self.data.shape[0]<block_start>a_plus[: t]=np.dot(T a_plus[: t-1])+rnd_q[t :]<line_sep>y_plus[t]=np.dot(Z a_plus[: t])+rnd_h[t]<block_end><block_end><block_end>alpha_hat,_=self.smoothed_state(self.data beta)<line_sep>alpha_hat_plus,_=self.smoothed_state(y_plus beta)<line_sep>alpha_tilde=alpha_hat-alpha_hat_plus+a_plus<line_sep><return>alpha_tilde<block_end><def_stmt>smoothed_state self data beta<block_start>""" Creates the negative log marginal likelihood of the model Parameters ---------- data : np.array Data to be smoothed beta : np.array Contains untransformed starting values for latent variables Returns ---------- - Smoothed states """<line_sep>T,Z,R,Q,H=self._ss_matrices(beta)<line_sep>alpha,V=dl_univariate_KFS(data Z H T Q R 0.0)<line_sep><return>alpha V<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.http response<import_from_stmt>django.urls reverse<import_from_stmt>django.urls.resolvers get_ns_resolver<import_from_stmt>.base WeChatTestCase<class_stmt>UtilDecoratorTestCase(WeChatTestCase)<block_start><pass><block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['TriggerTriggerArgs' ]<line_sep>@pulumi.input_type<class_stmt>TriggerTriggerArgs<block_start><def_stmt>__init__ __self__ * destination_arn:pulumi.Input[str] events:pulumi.Input[Sequence[pulumi.Input[str]]] name:pulumi.Input[str] branches:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> custom_data:Optional[pulumi.Input[str]]=<none><block_start>""" :param pulumi.Input[str] destination_arn: The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS). :param pulumi.Input[Sequence[pulumi.Input[str]]] events: The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS). If no events are specified, the trigger will run for all repository events. Event types include: `all`, `updateReference`, `createReference`, `deleteReference`. :param pulumi.Input[str] name: The name of the trigger. :param pulumi.Input[Sequence[pulumi.Input[str]]] branches: The branches that will be included in the trigger configuration. If no branches are specified, the trigger will apply to all branches. :param pulumi.Input[str] custom_data: Any custom data associated with the trigger that will be included in the information sent to the target of the trigger. """<line_sep>pulumi.set(__self__ "destination_arn" destination_arn)<line_sep>pulumi.set(__self__ "events" events)<line_sep>pulumi.set(__self__ "name" name)<if_stmt>branches<is><not><none><block_start>pulumi.set(__self__ "branches" branches)<block_end><if_stmt>custom_data<is><not><none><block_start>pulumi.set(__self__ "custom_data" custom_data)<block_end><block_end>@[email protected](name="destinationArn")<def_stmt>destination_arn self<arrow>pulumi.Input[str]<block_start>""" The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS). """<line_sep><return>pulumi.get(self "destination_arn")<block_end>@destination_arn.setter<def_stmt>destination_arn self value:pulumi.Input[str]<block_start>pulumi.set(self "destination_arn" value)<block_end>@[email protected]<def_stmt>events self<arrow>pulumi.Input[Sequence[pulumi.Input[str]]]<block_start>""" The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS). If no events are specified, the trigger will run for all repository events. Event types include: `all`, `updateReference`, `createReference`, `deleteReference`. """<line_sep><return>pulumi.get(self "events")<block_end>@events.setter<def_stmt>events self value:pulumi.Input[Sequence[pulumi.Input[str]]]<block_start>pulumi.set(self "events" value)<block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Input[str]<block_start>""" The name of the trigger. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:pulumi.Input[str]<block_start>pulumi.set(self "name" value)<block_end>@[email protected]<def_stmt>branches self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" The branches that will be included in the trigger configuration. If no branches are specified, the trigger will apply to all branches. """<line_sep><return>pulumi.get(self "branches")<block_end>@branches.setter<def_stmt>branches self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "branches" value)<block_end>@[email protected](name="customData")<def_stmt>custom_data self<arrow>Optional[pulumi.Input[str]]<block_start>""" Any custom data associated with the trigger that will be included in the information sent to the target of the trigger. """<line_sep><return>pulumi.get(self "custom_data")<block_end>@custom_data.setter<def_stmt>custom_data self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "custom_data" value)<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># process=cms.Process("BeamSpotDipServer")<line_sep>process.load("DQMServices.Core.DQM_cfg")<line_sep># message logger process.load("FWCore.MessageLogger.MessageLogger_cfi")<line_sep>process.MessageLogger.cerr=cms.untracked.PSet(threshold=cms.untracked.string('INFO') default=cms.untracked.PSet(limit=cms.untracked.int32(1000)) BeamSpotDipServer=cms.untracked.PSet(limit=cms.untracked.int32(1000)))<line_sep># source process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring('file:/tmp/sikler/b.root'# lxplus7101 ))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(100))<line_sep># beamspot from database process.load("CondCore.CondDB.CondDB_cfi")<line_sep>process.load("DQM.Integration.config.FrontierCondition_GT_cfi")<line_sep>process.GlobalTag.toGet=cms.VPSet(cms.PSet(record=cms.string("BeamSpotOnlineLegacyObjectsRcd") refreshTime=cms.uint64(1)) )<line_sep># module process.load("DQM.BeamMonitor.BeamSpotDipServer_cff")<line_sep>process.beamSpotDipServer.verbose=<true><line_sep>process.beamSpotDipServer.testing=<true><line_sep>process.beamSpotDipServer.readFromNFS=<true><line_sep>process.beamSpotDipServer.sourceFile="../../../../../BeamFitResults.txt"<line_sep>process.beamSpotDipServer.sourceFile1="../../../../../TkStatus.txt"<line_sep># process customizations <import_from_stmt>DQM.Integration.config.online_customizations_cfi *<line_sep>process=customise(process)<line_sep># path process.p=cms.Path(process.beamSpotDipServer)<line_sep>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing utils for pretraining, mostly taken from: https://github.com/dmlc/gluon-nlp/tree/v0.9.x/scripts/bert/data but modified to work with Bort. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>logging<import_stmt>io<import_stmt>os<import_stmt>glob<import_stmt>collections<import_stmt>random<import_stmt>time<import_stmt>numpy<as>np<import_stmt>gluonnlp<as>nlp<import_from_stmt>multiprocessing Pool<class_stmt>TrainingInstance(object)<block_start>"""A single training instance (sentence pair)."""<def_stmt>__init__ self tokens segment_ids masked_lm_positions masked_lm_labels is_random_next vocab<block_start>self.tokens=tokens<line_sep>self.segment_ids=segment_ids<line_sep>self.is_random_next=is_random_next<line_sep>self.masked_lm_positions=masked_lm_positions<line_sep>self.masked_lm_labels=masked_lm_labels<line_sep>self.vocab=vocab<block_end><def_stmt>__str__ self<block_start>tks=self.vocab.to_tokens(self.tokens)<line_sep>mask_tks=self.vocab.to_tokens(self.masked_lm_labels)<line_sep>s=''<line_sep>s<augadd>'tokens: %s\n'%(' '.join(tks))<line_sep>s<augadd>'segment_ids: %s\n'%(' '.join([str(x)<for>x self.segment_ids]))<line_sep>s<augadd>'is_random_next: %s\n'%self.is_random_next<line_sep>s<augadd>'masked_lm_positions: %s\n'%(' '.join([str(x)<for>x self.masked_lm_positions]))<line_sep>s<augadd>'masked_lm_labels: %s\n'%(' '.join(mask_tks))<line_sep>s<augadd>'\n'<line_sep><return>s<block_end><def_stmt>__repr__ self<block_start><return>self.__str__()<block_end><block_end><def_stmt>transform instance max_seq_length<block_start>"""Transform instance to inputs for MLM and NSP."""<line_sep>input_ids=instance.tokens<assert_stmt>len(input_ids)<le>max_seq_length<line_sep>segment_ids=instance.segment_ids<line_sep>masked_lm_positions=instance.masked_lm_positions<line_sep>valid_lengths=len(input_ids)<line_sep>masked_lm_ids=instance.masked_lm_labels<line_sep>masked_lm_weights=[1.0]<times>len(masked_lm_ids)<line_sep>next_sentence_label=1<if>instance.is_random_next<else>0<line_sep>features={}<line_sep>features['input_ids']=input_ids<line_sep>features['segment_ids']=segment_ids<line_sep>features['masked_lm_positions']=masked_lm_positions<line_sep>features['masked_lm_ids']=masked_lm_ids<line_sep>features['masked_lm_weights']=masked_lm_weights<line_sep>features['next_sentence_labels']=[next_sentence_label]<line_sep>features['valid_lengths']=[valid_lengths]<line_sep><return>features<block_end><def_stmt>print_example instance features<block_start>logging.debug('*** Example Instance ***')<line_sep>logging.debug('\n%s' instance)<for_stmt>feature_name features.keys()<block_start>feature=features[feature_name]<line_sep>logging.debug('Generated %s: %s' feature_name feature)<block_end><block_end><def_stmt>write_to_files_np features tokenizer max_seq_length max_predictions_per_seq output_files# pylint: disable=unused-argument <block_start>"""Write to numpy files from `TrainingInstance`s."""<line_sep>next_sentence_labels=[]<line_sep>valid_lengths=[]<assert_stmt>len(output_files)<eq>1 'numpy format only support single output file'<line_sep>output_file=output_files[0]<line_sep>(input_ids segment_ids masked_lm_positions masked_lm_ids masked_lm_weights next_sentence_labels valid_lengths)=features<line_sep>total_written=len(next_sentence_labels)<line_sep># store variable length numpy array object directly. outputs=collections.OrderedDict()<line_sep>outputs['input_ids']=np.array(input_ids dtype=object)<line_sep>outputs['segment_ids']=np.array(segment_ids dtype=object)<line_sep>outputs['masked_lm_positions']=np.array(masked_lm_positions dtype=object)<line_sep>outputs['masked_lm_ids']=np.array(masked_lm_ids dtype=object)<line_sep>outputs['masked_lm_weights']=np.array(masked_lm_weights dtype=object)<line_sep>outputs['next_sentence_labels']=np.array(next_sentence_labels dtype='int32')<line_sep>outputs['valid_lengths']=np.array(valid_lengths dtype='int32')<try_stmt><block_start>np.savez_compressed(output_file **outputs)<block_end><except_stmt>RuntimeError<as>e<block_start>logging.error(f"Runtime error: {e}, attempting to save half the data")<line_sep>halfway=len(outputs['input_ids'])<floordiv>2<line_sep>output1={k:v[:halfway]<for>k,v outputs.items()}<line_sep>np.savez_compressed(f"{output_file}_1.npz" **output1)<line_sep>output2={k:v[halfway:]<for>k,v outputs.items()}<line_sep>np.savez_compressed(f"{output_file}_2.npz" **output2)<block_end>logging.info('Wrote %d total instances' total_written)<block_end><def_stmt>tokenize_lines_fn x<block_start>"""Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup."""<line_sep>lines,tokenizer,vocab=x<line_sep>results=[]<for_stmt>line lines<block_start><if_stmt><not>line<block_start><break><block_end>line=line.strip()<line_sep># Empty lines are used as document delimiters <if_stmt><not>line<block_start>results.append([])<block_end><else_stmt><block_start>tokens=vocab[[vocab.bos_token]+vocab[tokenizer(line)]+[vocab.eos_token]]<if_stmt>tokens<block_start>results.append(tokens)<block_end><block_end><block_end><return>results<block_end><def_stmt>convert_to_npz instances max_seq_length<block_start>"""Create masked language model and next sentence prediction samples as numpy arrays."""<line_sep>input_ids=[]<line_sep>segment_ids=[]<line_sep>masked_lm_positions=[]<line_sep>masked_lm_ids=[]<line_sep>masked_lm_weights=[]<line_sep>next_sentence_labels=[]<line_sep>valid_lengths=[]<for_stmt>inst_index,instance enumerate(instances)<block_start>features=transform(instance max_seq_length)<line_sep>input_id=features['input_ids']<line_sep>segment_id=features['segment_ids']<line_sep>masked_lm_position=features['masked_lm_positions']<line_sep>masked_lm_id=features['masked_lm_ids']<line_sep>masked_lm_weight=features['masked_lm_weights']<line_sep>next_sentence_label=features['next_sentence_labels'][0]<line_sep>valid_length=features['valid_lengths'][0]<line_sep>input_ids.append(np.ascontiguousarray(input_id dtype='int32'))<line_sep>segment_ids.append(np.ascontiguousarray(segment_id dtype='int32'))<line_sep>masked_lm_positions.append(np.ascontiguousarray(masked_lm_position dtype='int32'))<line_sep>masked_lm_ids.append(np.ascontiguousarray(masked_lm_id dtype='int32'))<line_sep>masked_lm_weights.append(np.ascontiguousarray(masked_lm_weight dtype='float32'))<line_sep>next_sentence_labels.append(next_sentence_label)<line_sep>valid_lengths.append(valid_length)<line_sep># debugging information <if_stmt>inst_index<l>1<block_start>print_example(instance features)<block_end><block_end><return>input_ids masked_lm_ids masked_lm_positions masked_lm_weights next_sentence_labels segment_ids valid_lengths<block_end><def_stmt>create_training_instances x<block_start>"""Create `TrainingInstance`s from raw text. The expected input file format is the following: (1) One sentence per line. These should ideally be actual sentences, not entire paragraphs or arbitrary spans of text. (Because we use the sentence boundaries for the "next sentence prediction" task). (2) Blank lines between documents. Document boundaries are needed so that the "next sentence prediction" task doesn't span between documents. The function expect arguments packed in a tuple as described below. Parameters ---------- input_files : list of str List of paths to input text files. tokenizer : Tokenizer The tokenizer max_seq_length : int The hard limit of maximum sequence length of sentence pairs dupe_factor : int Duplication factor. short_seq_prob : float The probability of sampling sequences shorter than the max_seq_length. masked_lm_prob : float The probability of replacing texts with masks/random words/original words. max_predictions_per_seq : int The hard limit of the number of predictions for masked words whole_word_mask : bool Whether to do masking for whole words vocab : Vocab The vocab for the model nworker : int The number of processes to help processing texts in parallel worker_pool : multiprocessing.Pool Must be provided if nworker > 1. The caller is responsible for the destruction of the worker pool. output_file : str or None Path to the output file. If None, the result is not serialized. If provided, results are stored in the order of (input_ids, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels, valid_lengths). Returns ------- A tuple of np.ndarray : input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights next_sentence_labels, segment_ids, valid_lengths """<line_sep>(input_files tokenizer max_seq_length short_seq_prob masked_lm_prob max_predictions_per_seq whole_word_mask vocab dupe_factor nworker worker_pool output_file)=x<line_sep>time_start=time.time()<if_stmt>nworker<g>1<block_start><assert_stmt>worker_pool<is><not><none><block_end>all_documents=[[]]<for_stmt>input_file input_files<block_start><with_stmt>io.open(input_file 'r' encoding='utf-8')<as>reader<block_start>lines=reader.readlines()<line_sep>num_lines=len(lines)<line_sep>num_lines_per_worker=(num_lines+nworker-1)<floordiv>nworker<line_sep>process_args=[]<line_sep># tokenize in parallel <for_stmt>worker_idx range(nworker)<block_start>start=worker_idx<times>num_lines_per_worker<line_sep>end=min((worker_idx+1)<times>num_lines_per_worker num_lines)<line_sep>process_args.append((lines[start:end] tokenizer vocab))<block_end><if_stmt>worker_pool<block_start>tokenized_results=worker_pool.map(tokenize_lines_fn process_args)<block_end><else_stmt><block_start>tokenized_results=[tokenize_lines_fn(process_args[0])]<block_end><for_stmt>tokenized_result tokenized_results<block_start><for_stmt>line tokenized_result<block_start><if_stmt><not>line<block_start><if_stmt>all_documents[-1]<block_start>all_documents.append([])<block_end><block_end><else_stmt><block_start>all_documents[-1].append(line)<block_end><block_end><block_end><block_end><block_end># remove the last empty document if any <if_stmt><not>all_documents[-1]<block_start>all_documents=all_documents[:-1]<block_end># generate training instances instances=[]<if_stmt>worker_pool<block_start>process_args=[]<for_stmt>document_index range(len(all_documents))<block_start>process_args.append((all_documents document_index max_seq_length short_seq_prob masked_lm_prob max_predictions_per_seq whole_word_mask vocab tokenizer))<block_end><for_stmt>_ range(dupe_factor)<block_start>instances_results=worker_pool.map(create_instances_from_document process_args)<for_stmt>instances_result instances_results<block_start>instances.extend(instances_result)<block_end><block_end>npz_instances=worker_pool.apply(convert_to_npz (instances max_seq_length))<block_end><else_stmt><block_start><for_stmt>_ range(dupe_factor)<block_start><for_stmt>document_index range(len(all_documents))<block_start>instances.extend(create_instances_from_document((all_documents document_index max_seq_length short_seq_prob masked_lm_prob max_predictions_per_seq whole_word_mask vocab tokenizer)))<block_end><block_end>npz_instances=convert_to_npz(instances max_seq_length)<block_end>(input_ids masked_lm_ids masked_lm_positions masked_lm_weights next_sentence_labels segment_ids valid_lengths)=npz_instances<line_sep># write output to files. Used when pre-generating files <if_stmt>output_file<block_start>features=(input_ids segment_ids masked_lm_positions masked_lm_ids masked_lm_weights next_sentence_labels valid_lengths)<line_sep>logging.debug('*** Writing to output file %s ***' output_file)<line_sep>write_to_files_np(features tokenizer max_seq_length max_predictions_per_seq [output_file])<line_sep>features=<none><block_end><else_stmt><block_start>features=(input_ids masked_lm_ids masked_lm_positions masked_lm_weights next_sentence_labels segment_ids valid_lengths)<block_end>time_end=time.time()<line_sep>logging.debug('Process %d files took %.1f s' len(input_files) time_end-time_start)<line_sep><return>features<block_end><def_stmt>create_instances_from_document x<block_start>"""Creates `TrainingInstance`s for a single document."""<line_sep>(all_documents document_index max_seq_length short_seq_prob masked_lm_prob max_predictions_per_seq whole_word_mask vocab tokenizer)=x<line_sep>document=all_documents[document_index]<line_sep>_MASK_TOKEN=vocab[vocab.mask_token]<line_sep>_CLS_TOKEN=vocab[vocab.cls_token]<line_sep>_SEP_TOKEN=vocab[vocab.sep_token]<line_sep># Account for [CLS], [SEP], [SEP] max_num_tokens=max_seq_length-3<line_sep># According to the original tensorflow implementation: # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1, 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length=max_num_tokens<if_stmt>random.random()<l>short_seq_prob<block_start>target_seq_length=random.randint(2 max_num_tokens)<block_end># We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances=[]<line_sep>current_chunk=[]<line_sep>current_length=0<line_sep>i=0<while_stmt>i<l>len(document)# pylint: disable=R1702 <block_start>segment=document[i]<line_sep>current_chunk.append(segment)<line_sep>current_length<augadd>len(segment)<if_stmt>i<eq>len(document)-1<or>current_length<ge>target_seq_length<block_start><if_stmt>current_chunk# `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. <block_start>a_end=1<if_stmt>len(current_chunk)<ge>2<block_start>a_end=random.randint(1 len(current_chunk)-1)<block_end>tokens_a=[]<for_stmt>j range(a_end)<block_start>tokens_a.extend(current_chunk[j])<block_end>tokens_b=[]<line_sep># Random next is_random_next=<false><if_stmt>len(current_chunk)<eq>1<or>random.random()<l>0.5<block_start>is_random_next=<true><line_sep>target_b_length=target_seq_length-len(tokens_a)<line_sep># randomly choose a document other than itself random_document_index=random.randint(0 len(all_documents)-2)<if_stmt>random_document_index<ge>document_index<block_start>random_document_index<augadd>1<block_end>random_document=all_documents[random_document_index]<line_sep>random_start=random.randint(0 len(random_document)-1)<for_stmt>j range(random_start len(random_document))<block_start>tokens_b.extend(random_document[j])<if_stmt>len(tokens_b)<ge>target_b_length<block_start><break><block_end><block_end># We didn't actually use these segments so we 'put them back' so # they don't go to waste. num_unused_segments=len(current_chunk)-a_end<line_sep>i<augsub>num_unused_segments<block_end># Actual next <else_stmt><block_start>is_random_next=<false><for_stmt>j range(a_end len(current_chunk))<block_start>tokens_b.extend(current_chunk[j])<block_end><block_end>truncate_seq_pair(tokens_a tokens_b max_num_tokens)<assert_stmt>len(tokens_a)<ge>1<assert_stmt>len(tokens_b)<ge>1<line_sep>tokens=[]<line_sep>segment_ids=[]<line_sep>tokens.append(_CLS_TOKEN)<line_sep>segment_ids.append(0)<for_stmt>token tokens_a<block_start>tokens.append(token)<line_sep>segment_ids.append(0)<block_end>tokens.append(_SEP_TOKEN)<line_sep>segment_ids.append(0)<for_stmt>token tokens_b<block_start>tokens.append(token)<line_sep>segment_ids.append(1)<block_end>tokens.append(_SEP_TOKEN)<line_sep>segment_ids.append(1)<line_sep>(tokens masked_lm_positions masked_lm_labels)=create_masked_lm_predictions(tokens masked_lm_prob max_predictions_per_seq whole_word_mask vocab tokenizer _MASK_TOKEN _CLS_TOKEN _SEP_TOKEN)<line_sep>instance=TrainingInstance(tokens=tokens segment_ids=segment_ids is_random_next=is_random_next masked_lm_positions=masked_lm_positions masked_lm_labels=masked_lm_labels vocab=vocab)<line_sep>instances.append(instance)<block_end>current_chunk=[]<line_sep>current_length=0<block_end>i<augadd>1<block_end><return>instances<block_end>MaskedLmInstance=collections.namedtuple('MaskedLmInstance' ['index' 'label'])<def_stmt>create_masked_lm_predictions tokens masked_lm_prob max_predictions_per_seq whole_word_mask vocab tokenizer _MASK_TOKEN _CLS_TOKEN _SEP_TOKEN<block_start>"""Creates the predictions for the masked LM objective."""<line_sep>cand_indexes=[]<for_stmt>(i token) enumerate(tokens)<block_start><if_stmt>token<in>[_CLS_TOKEN _SEP_TOKEN]<block_start><continue><block_end># Whole Word Masking means that if we mask all of the subwords # corresponding to an original word. When a word has been split into # subwords, the first token does not have any marker and any subsequence # tokens are prefixed with ##. So whenever we see the ## token, we # append it to the previous set of word indexes. # # Note that Whole Word Masking does *not* change the training code # at all -- we still predict each subword independently, softmaxed # over the entire vocabulary. <if_stmt>whole_word_mask<and>len(cand_indexes)<ge>1<and><not>tokenizer.is_first_subword(vocab.idx_to_token[token])<block_start>cand_indexes[-1].append(i)<block_end><else_stmt><block_start>cand_indexes.append([i])<block_end><block_end>random.shuffle(cand_indexes)<line_sep>output_tokens=list(tokens)<line_sep>num_to_predict=min(max_predictions_per_seq max(1 int(round(len(tokens)<times>masked_lm_prob))))<line_sep>masked_lms=[]<line_sep>covered_indexes=set()<for_stmt>index_set cand_indexes<block_start><if_stmt>len(masked_lms)<ge>num_to_predict<block_start><break><block_end># If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. <if_stmt>len(masked_lms)+len(index_set)<g>num_to_predict<block_start><continue><block_end>is_any_index_covered=<false><for_stmt>index index_set<block_start><if_stmt>index<in>covered_indexes<block_start>is_any_index_covered=<true><line_sep><break><block_end><block_end><if_stmt>is_any_index_covered<block_start><continue><block_end><for_stmt>index index_set<block_start>covered_indexes.add(index)<line_sep>masked_token=<none><line_sep># 80% of the time, replace with [MASK] <if_stmt>random.random()<l>0.8<block_start>masked_token=_MASK_TOKEN<block_end><else_stmt># 10% of the time, keep original <block_start><if_stmt>random.random()<l>0.5<block_start>masked_token=tokens[index]<block_end># 10% of the time, replace with random word <else_stmt># generate a random word in [0, vocab_size - 1] <block_start>masked_token=random.randint(0 len(vocab)-1)<block_end><block_end>output_tokens[index]=masked_token<line_sep>masked_lms.append(MaskedLmInstance(index=index label=tokens[index]))<block_end><block_end><assert_stmt>len(masked_lms)<le>num_to_predict<line_sep>masked_lms=sorted(masked_lms key=<lambda>x:x.index)<line_sep>masked_lm_positions=[]<line_sep>masked_lm_labels=[]<for_stmt>p masked_lms<block_start>masked_lm_positions.append(p.index)<line_sep>masked_lm_labels.append(p.label)<block_end><return>(output_tokens masked_lm_positions masked_lm_labels)<block_end><def_stmt>truncate_seq_pair tokens_a tokens_b max_num_tokens<block_start>"""Truncates a pair of sequences to a maximum sequence length."""<while_stmt><true><block_start>total_length=len(tokens_a)+len(tokens_b)<if_stmt>total_length<le>max_num_tokens<block_start><break><block_end>trunc_tokens=tokens_a<if>len(tokens_a)<g>len(tokens_b)<else>tokens_b<assert_stmt>len(trunc_tokens)<ge>1<line_sep># We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. <if_stmt>random.random()<l>0.5<block_start><del_stmt>trunc_tokens[0]<block_end><else_stmt><block_start>trunc_tokens.pop()<block_end><block_end><block_end><def_stmt>main <block_start>"""Main function."""<line_sep>time_start=time.time()<line_sep># random seed random.seed(args.random_seed)<line_sep># create output dir output_dir=os.path.expanduser(args.output_dir)<line_sep>nlp.utils.mkdir(output_dir)<line_sep>vocab=nlp.data.utils._load_pretrained_vocab(args.dataset_name root=output_dir cls=nlp.vocab.BERTVocab)<line_sep>tokenizer=nlp.data.GPT2BPETokenizer()<line_sep># count the number of input files input_files=[]<for_stmt>input_pattern args.input_file.split(',')<block_start>input_files.extend(glob.glob(os.path.expanduser(input_pattern)))<block_end><for_stmt>input_file input_files<block_start>logging.info('\t%s' input_file)<block_end>num_inputs=len(input_files)<line_sep>num_outputs=min(args.num_outputs len(input_files))<line_sep>logging.info('*** Reading from %d input files ***' num_inputs)<line_sep># calculate the number of splits file_splits=[]<line_sep>split_size=(num_inputs+num_outputs-1)<floordiv>num_outputs<for_stmt>i range(num_outputs)<block_start>split_start=i<times>split_size<line_sep>split_end=min(num_inputs (i+1)<times>split_size)<line_sep>file_splits.append(input_files[split_start:split_end])<block_end># prepare workload count=0<line_sep>process_args=[]<for_stmt>i,file_split enumerate(file_splits)<block_start>output_file=os.path.join(output_dir 'part-{}.npz'.format(str(i).zfill(3)))<line_sep>count<augadd>len(file_split)<line_sep>process_args.append((file_split tokenizer args.max_seq_length args.short_seq_prob args.masked_lm_prob args.max_predictions_per_seq args.whole_word_mask vocab args.dupe_factor 1 <none> output_file))<block_end># sanity check <assert_stmt>count<eq>len(input_files)<line_sep># dispatch to workers nworker=args.num_workers<if_stmt>nworker<g>1<block_start>pool=Pool(nworker)<line_sep>pool.map(create_training_instances process_args)<block_end><else_stmt><block_start><for_stmt>process_arg process_args<block_start>create_training_instances(process_arg)<block_end><block_end>time_end=time.time()<line_sep>logging.info('Time cost=%.1f' time_end-time_start)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Pre-training data generator for Bort' formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument('--input_file' type=str required=<true> help='Input files, separated by comma. For example, "~/data/*.txt"')<line_sep>parser.add_argument('--output_dir' type=str required=<true> help='Output directory.')<line_sep>parser.add_argument('--dataset_name' type=str default='openwebtext_ccnews_stories_books_cased' choices=['book_corpus_wiki_en_uncased' 'book_corpus_wiki_en_cased' 'wiki_multilingual_uncased' 'wiki_multilingual_cased' 'wiki_cn_cased' 'openwebtext_ccnews_stories_books_cased'] help='The dataset name for the vocab file Bort model was trained on')<line_sep>parser.add_argument('--whole_word_mask' action='store_true' help='Whether to use whole word masking rather than per-subword masking.')<line_sep>parser.add_argument('--max_seq_length' type=int default=512 help='Maximum sequence length.')<line_sep>parser.add_argument('--max_predictions_per_seq' type=int default=80 help='Maximum number of masked LM predictions per sequence. ')<line_sep>parser.add_argument('--random_seed' type=int default=12345 help='Random seed for data generation.')<line_sep>parser.add_argument('--dupe_factor' type=int default=1 help='Number of times to duplicate the input data (with different masks).')<line_sep>parser.add_argument('--masked_lm_prob' type=float default=0.15 help='Masked LM probability.')<line_sep>parser.add_argument('--short_seq_prob' type=float default=0.1 help='Probability of creating sequences which are shorter than the '<concat>'maximum length. ')<line_sep>parser.add_argument('--verbose' action='store_true' help='Print debug information')<line_sep>parser.add_argument('--num_workers' type=int default=8 help='Number of workers for parallel processing, where each generates an output file.')<line_sep>parser.add_argument('--num_outputs' type=int default=1 help='Number of desired output files, where each is processed independently by a worker.')<line_sep>args=parser.parse_args()<line_sep>logging.getLogger().setLevel(logging.DEBUG<if>args.verbose<else>logging.INFO)<line_sep>logging.info(args)<line_sep>main()<block_end>
<import_stmt>sys<import_stmt>os<import_stmt>glob<import_stmt>re<import_stmt>gzip<import_stmt>array<import_stmt>loompy<import_stmt>numpy<as>np<import_stmt>random<import_stmt>string<import_stmt>subprocess<import_stmt>multiprocessing<import_stmt>csv<import_stmt>itertools<import_from_stmt>collections defaultdict<import_stmt>logging<import_stmt>h5py<import_from_stmt>typing *<import_stmt>velocyto<as>vcy<def_stmt>id_generator size:int=6 chars:str=string.ascii_uppercase+string.digits<arrow>str<block_start><return>''.join(random.choice(chars)<for>_ range(size))<block_end><def_stmt>_run * bamfile:Tuple[str] gtffile:str bcfile:str outputfolder:str sampleid:str metadatatable:str repmask:str onefilepercell:bool logic:str without_umi:str umi_extension:str multimap:bool test:bool samtools_threads:int samtools_memory:int loom_numeric_dtype:str dump:bool verbose:int additional_ca:dict={}<arrow><none><block_start>"""Runs the velocity analysis outputing a loom file BAMFILE or [BAMFILES] one or several bam files with position-sorted GTFFILE annotation file NOTE: it is keyword only argument function """<line_sep>######################## # Resolve Inputs # ######################## logging.basicConfig(stream=sys.stdout format='%(asctime)s - %(levelname)s - %(message)s' level=[logging.ERROR logging.WARNING logging.INFO logging.DEBUG][verbose])<if_stmt>isinstance(bamfile tuple)<and>len(bamfile)<g>1<and>bamfile[-1][-4:]<in>[".bam" ".sam"]<block_start>multi=<true><block_end><elif_stmt>isinstance(bamfile tuple)<and>len(bamfile)<eq>1<block_start>multi=<false><block_end><else_stmt><block_start><raise>IOError(f"Something went wrong in the argument parsing. You passed as bamfile: {bamfile}")<block_end><if_stmt>onefilepercell<and>multi<block_start><if_stmt>bcfile<is><not><none><block_start><raise>ValueError("Inputs incompatibility. --bcfile/-b option was used together with --onefilepercell/-c option.")<block_end>logging.warning("Each bam file will be interpreted as a DIFFERENT cell")<block_end><elif_stmt><not>onefilepercell<and>multi<block_start>logging.warning("Several input files but --onefilepercell is False. Each bam file will be interpreted as containing a SET of cells!!!")<block_end><if_stmt>sampleid<is><none><block_start><assert_stmt>metadatatable<is><none> "--metadatatable was specified but cannot fetch sample metadata without valid sampleid"<if_stmt>multi<block_start>logging.warning(f"When using mutliple files you may want to use --sampleid option to specify the name of the output file")<block_end><if_stmt>multi<and><not>onefilepercell<block_start>full_name="_".join([os.path.basename(bamfile[i]).split(".")[0]<for>i range(len(bamfile))])<if_stmt>len(full_name)<g>50<block_start>sampleid=f'multi_input_{os.path.basename(bamfile[0]).split(".")[0]}_{id_generator(5)}'<block_end><else_stmt><block_start>sampleid=f'multi_input_{full_name}_and_others_{id_generator(5)}'<block_end><block_end><elif_stmt>multi<and>onefilepercell<block_start>sampleid=f'onefilepercell_{os.path.basename(bamfile[0]).split(".")[0]}_and_others_{id_generator(5)}'<block_end><else_stmt><block_start>sampleid=f'{os.path.basename(bamfile[0]).split(".")[0]}_{id_generator(5)}'<block_end>logging.info(f"No SAMPLEID specified, the sample will be called {sampleid} (last 5 digits are a random-id to avoid overwriting some other file by mistake)")<block_end># Create an output folder inside the cell ranger output folder <if_stmt>outputfolder<is><none><block_start>outputfolder=os.path.join(os.path.split(bamfile[0])[0] "velocyto")<line_sep>logging.info(f"No OUTPUTFOLDER specified, find output files inside {outputfolder}")<block_end><if_stmt><not>os.path.exists(outputfolder)<block_start>os.mkdir(outputfolder)<block_end>logic_class=getattr(vcy logic)<if_stmt><not>issubclass(logic_class vcy.Logic)<block_start><raise>ValueError(f"{logic} is not a valid logic. Choose one among {', '.join([k<for>k,v vcy.logic.__dict__.items()<if>issubclass(v vcy.Logic)])}")<block_end><else_stmt><block_start>logging.debug(f"Using logic: {logic}")<line_sep>logic_obj=logic_class()<block_end><if_stmt>bcfile<is><none><block_start>logging.debug("Cell barcodes will be determined while reading the .bam file")<line_sep>valid_bcset=<none><block_end><else_stmt># Get valid cell barcodes <block_start>valid_bcs_list=(gzip.open(bcfile).read().decode()<if>bcfile.endswith(".gz")<else>open(bcfile).read()).rstrip().split()<line_sep>valid_cellid_list=np.array([f"{sampleid}:{v_bc}"<for>v_bc valid_bcs_list])# with sample id and with -1 <if_stmt>len(set(bc.split('-')[0]<for>bc valid_bcs_list))<eq>1<block_start>gem_grp=f"-{valid_bcs_list[0].split('-')[-1]}"<block_end><else_stmt><block_start>gem_grp="x"<block_end>valid_bcset=set(bc.split('-')[0]<for>bc valid_bcs_list)# without -1 logging.info(f"Read {len(valid_bcs_list)} cell barcodes from {bcfile}")<line_sep>logging.debug(f"Example of barcode: {valid_bcs_list[0].split('-')[0]} and cell_id: {valid_cellid_list[0]}")<block_end># Get metadata from sample sheet <if_stmt>metadatatable<block_start><try_stmt><block_start>sample_metadata=vcy.MetadataCollection(metadatatable)<line_sep>sample=sample_metadata.where("SampleID" sampleid)<if_stmt>len(sample)<eq>0<block_start>logging.error(f"Sample ID {sampleid} not found in sample sheet")<line_sep># schema = [] # type: List sample={}<block_end><elif_stmt>len(sample)<g>1<block_start>logging.error(f"Sample ID {sampleid} has multiple lines in sample sheet")<line_sep>sys.exit(1)<block_end><else_stmt># schema = sample[0].types <block_start>sample=sample[0].dict<block_end>logging.debug(f"Collecting column attributes from {metadatatable}")<block_end><except_stmt>(NameError TypeError)<as>e<block_start>logging.warn("SAMPLEFILE was not specified. add -s SAMPLEFILE to add metadata.")<line_sep>sample={}<block_end><block_end><else_stmt><block_start>sample={}<block_end>######################## # Start Analysis # ######################## # Initialize Exon-Intron Counter with the logic and valid barcodes (need to do it now to peek) <if_stmt>without_umi<block_start><if_stmt>umi_extension<ne>"no"<block_start>logging.warning("--umi-extension was specified but incompatible with --without-umi, it will be ignored!")<block_end>umi_extension="without_umi"<block_end>exincounter=vcy.ExInCounter(sampleid=sampleid logic=logic_class valid_bcset=valid_bcset umi_extension=umi_extension onefilepercell=onefilepercell dump_option=dump outputfolder=outputfolder)<line_sep># Heuristic to chose the memory/cpu effort <try_stmt><block_start>mb_available=int(subprocess.check_output('grep MemAvailable /proc/meminfo'.split()).split()[1])/1000<block_end><except_stmt>subprocess.CalledProcessError<block_start>logging.warning("Your system does not support calling `grep MemAvailable /proc/meminfo` so the memory effort for the samtools command could not be chosen appropriately. 32Gb will be assumed")<line_sep>mb_available=32000# 64Gb <block_end>threads_to_use=min(samtools_threads multiprocessing.cpu_count())<line_sep>mb_to_use=int(min(samtools_memory mb_available/(len(bamfile)<times>threads_to_use)))<line_sep>compression=vcy.BAM_COMPRESSION<line_sep># I need to peek into the bam file to know wich cell barcode flag should be used <if_stmt>onefilepercell<and>without_umi<block_start>tagname="NOTAG"<block_end><elif_stmt>onefilepercell<block_start>logging.debug("The multi input option ")<line_sep>tagname="NOTAG"<line_sep>exincounter.peek_umi_only(bamfile[0])<block_end><else_stmt><block_start>exincounter.peek(bamfile[0])<line_sep>tagname=exincounter.cellbarcode_str<block_end><if_stmt>multi<and>onefilepercell<block_start>bamfile_cellsorted=list(bamfile)<block_end><elif_stmt>onefilepercell<block_start>bamfile_cellsorted=[bamfile[0]]<block_end><else_stmt><block_start>bamfile_cellsorted=[f"{os.path.join(os.path.dirname(bmf) 'cellsorted_'+os.path.basename(bmf))}"<for>bmf bamfile]<block_end>sorting_process:Dict[int Any]={}<for_stmt>ni,bmf_cellsorted enumerate(bamfile_cellsorted)# Start a subprocess that sorts the bam file <block_start>command=f"samtools sort -l {compression} -m {mb_to_use}M -t {tagname} -O BAM -@ {threads_to_use} -o {bmf_cellsorted} {bamfile[ni]}"<if_stmt>os.path.exists(bmf_cellsorted)# This should skip sorting in smartseq2 <block_start>logging.warning(f"The file {bmf_cellsorted} already exists. The sorting step will be skipped and the existing file will be used.")<line_sep>check_end_process=<false><block_end><else_stmt><block_start>sorting_process[ni]=subprocess.Popen(command.split() stdout=subprocess.PIPE)<line_sep>logging.info(f"Starting the sorting process of {bamfile[ni]} the output will be at: {bmf_cellsorted}")<line_sep>logging.info(f"Command being run is: {command}")<line_sep>logging.info(f"While the bam sorting happens do other things...")<line_sep>check_end_process=<true><block_end><block_end># Load annotations logging.info(f"Load the annotation from {gtffile}")<line_sep>annotations_by_chrm_strand=exincounter.read_transcriptmodels(gtffile)<line_sep>chrs=list(v<for>k,v annotations_by_chrm_strand.items())<line_sep>tms=list(itertools.chain.from_iterable((v.values()<for>v chrs)))<line_sep>ivls=list(itertools.chain.from_iterable(tms))<line_sep>logging.debug(f"Generated {len(ivls)} features corresponding to {len(tms)} transcript models from {gtffile}")<del_stmt>chrs tms ivls<line_sep># Load annotations <if_stmt>repmask<is><not><none><block_start>logging.info(f"Load the repeat masking annotation from {repmask}")<line_sep>mask_ivls_by_chromstrand=exincounter.read_repeats(repmask)<block_end># Go through the bam files a first time to markup introns logging.info(f"Scan {' '.join(bamfile)} to validate intron intervals")<if_stmt>test# NOTE: Remove this after finishing testing, the only purpuso was to save 15min in the debugging process <block_start>logging.warning("This place is for developer only!")<import_stmt>pickle<if_stmt>os.path.exists("exincounter_dump.pickle")<block_start>logging.debug("exincounter_dump.pickle is being loaded")<line_sep>exincounter=pickle.load(open("exincounter_dump.pickle" "rb"))<block_end><else_stmt><block_start>logging.debug("exincounter_dump.pickle was not found")<line_sep>logging.debug("Dumping exincounter_dump.pickle BEFORE markup")<line_sep>pickle.dump(exincounter open("exincounter_dump.pickle" "wb"))<line_sep>exincounter.mark_up_introns(bamfile=bamfile multimap=multimap)<block_end><block_end><else_stmt><block_start>exincounter.mark_up_introns(bamfile=bamfile multimap=multimap)<block_end># Wait for child process to terminate <if_stmt>check_end_process<block_start>logging.info(f"Now just waiting that the bam sorting process terminates")<for_stmt>k sorting_process.keys()<block_start>returncode=sorting_process[k].wait()<if_stmt>returncode<eq>0<block_start>logging.info(f"bam file #{k} has been sorted")<block_end><else_stmt><block_start><raise>MemoryError(f"bam file #{k} could not be sorted by cells.\n\ This is probably related to an old version of samtools, please install samtools >= 1.6.\ In alternative this could be a memory error, try to set the --samtools_memory option to a value compatible with your system. \ Otherwise sort manually by samtools ``sort -l [compression] -m [mb_to_use]M -t [tagname] -O BAM -@ [threads_to_use] -o cellsorted_[bamfile] [bamfile]``")<block_end><block_end><block_end># Do the actual counting logging.debug("Start molecule counting!")<line_sep>results=exincounter.count(bamfile_cellsorted multimap=multimap)# NOTE: we would avoid some millions of if statements evaluations if we write two function count and count_with output dict_list_arrays,cell_bcs_order=results<line_sep>######################## # Output # ######################## # Prepare the loom file output <if_stmt><not>exincounter.filter_mode<block_start>valid_bcset=exincounter.valid_bcset# without -1 valid_bcs_list=list(valid_bcset)# without -1 gem_grp=""<line_sep>valid_cellid_list=np.array([f"{sampleid}:{v_bc}"<for>v_bc valid_bcs_list])# with sampleid and with -1 logging.debug(f"Example of barcode: {valid_bcs_list[0]} and cell_id: {valid_cellid_list[0]}")<block_end>ca={"CellID":np.array([f"{sampleid}:{v_bc}{gem_grp}"<for>v_bc cell_bcs_order])}<line_sep>ca.update(additional_ca)<for_stmt>key,value sample.items()<block_start>ca[key]=np.full(len(cell_bcs_order) value)<block_end># Save to loom file outfile=os.path.join(outputfolder f"{sampleid}.loom")<line_sep>logging.debug(f"Generating output file {outfile}")<line_sep># row attributes atr_table=(("Gene" "genename" str) ("Accession" "geneid" str) ("Chromosome" "chrom" str) ("Strand" "strand" str) ("Start" "start" int) ("End" "end" int))<line_sep>logging.debug("Collecting row attributes")<line_sep>ra={}<for_stmt>name_col_attr,name_obj_attr,dtyp atr_table<block_start>tmp_array=np.zeros((len(exincounter.genes) ) dtype=object)# type: np.ndarray <for_stmt>gene_id,gene_info exincounter.genes.items()<block_start>tmp_array[exincounter.geneid2ix[gene_id]]=getattr(gene_info name_obj_attr)<block_end>ra[name_col_attr]=tmp_array.astype(dtyp)<block_end>logging.debug("Generating data table")<line_sep>layers:Dict[str np.ndarray]={}<for_stmt>layer_name logic_obj.layers<block_start>layers[layer_name]=np.concatenate(dict_list_arrays[layer_name] axis=1)<del_stmt>dict_list_arrays[layer_name]<block_end><for_stmt>layer_name logic_obj.layers<block_start>total:np.ndarray# This is just a type annotation to avoid mypy complaints <try_stmt><block_start>total<augadd>layers[layer_name]<block_end><except_stmt>NameError<block_start>total=np.array(layers[layer_name])<block_end><block_end>logging.debug("Writing loom file")<try_stmt><block_start>ds=loompy.create(filename=outfile matrix=total row_attrs=ra col_attrs=ca dtype="float32")<for_stmt>layer_name logic_obj.layers<block_start>ds.set_layer(name=layer_name matrix=layers[layer_name] dtype=loom_numeric_dtype)<block_end>ds.attrs["velocyto.__version__"]=vcy.__version__<line_sep>ds.attrs["velocyto.logic"]=logic<line_sep>ds.close()<block_end><except_stmt>TypeError# If user is using loompy2 # NOTE maybe this is not super efficient if the type and order are already correct <block_start>tmp_layers={"":total.astype("float32" order="C" copy=<false>)}<line_sep>tmp_layers.update({layer_name:layers[layer_name].astype(loom_numeric_dtype order="C" copy=<false>)<for>layer_name logic_obj.layers})<line_sep>loompy.create(filename=outfile layers=tmp_layers row_attrs=ra col_attrs=ca file_attrs={"velocyto.__version__":vcy.__version__ "velocyto.logic":logic})<block_end>logging.debug("Terminated Succesfully!")<block_end>
"""Example code for the nodes in the example pipeline. This code is meant just for illustrating basic Kedro features. Delete this when you start working on your own Kedro project. """<line_sep># pylint: disable=invalid-name <import_stmt>logging<import_from_stmt>typing Any Dict<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<def_stmt>train_model train_x:pd.DataFrame train_y:pd.DataFrame parameters:Dict[str Any]<arrow>np.ndarray<block_start>"""Node for training a simple multi-class logistic regression model. The number of training iterations as well as the learning rate are taken from conf/project/parameters.yml. All of the data as well as the parameters will be provided to this function at the time of execution. """<line_sep>num_iter=parameters["example_num_train_iter"]<line_sep>lr=parameters["example_learning_rate"]<line_sep>X=train_x.to_numpy()<line_sep>Y=train_y.to_numpy()<line_sep># Add bias to the features bias=np.ones((X.shape[0] 1))<line_sep>X=np.concatenate((bias X) axis=1)<line_sep>weights=[]<line_sep># Train one model for each class in Y <for_stmt>k range(Y.shape[1])# Initialise weights <block_start>theta=np.zeros(X.shape[1])<line_sep>y=Y[: k]<for_stmt>_ range(num_iter)<block_start>z=np.dot(X theta)<line_sep>h=_sigmoid(z)<line_sep>gradient=np.dot(X.T (h-y))/y.size<line_sep>theta<augsub>lr<times>gradient<block_end># Save the weights for each model weights.append(theta)<block_end># Return a joint multi-class model with weights for all classes <return>np.vstack(weights).transpose()<block_end><def_stmt>predict model:np.ndarray test_x:pd.DataFrame<arrow>np.ndarray<block_start>"""Node for making predictions given a pre-trained model and a test set."""<line_sep>X=test_x.to_numpy()<line_sep># Add bias to the features bias=np.ones((X.shape[0] 1))<line_sep>X=np.concatenate((bias X) axis=1)<line_sep># Predict "probabilities" for each class result=_sigmoid(np.dot(X model))<line_sep># Return the index of the class with max probability for all samples <return>np.argmax(result axis=1)<block_end><def_stmt>report_accuracy predictions:np.ndarray test_y:pd.DataFrame<arrow><none><block_start>"""Node for reporting the accuracy of the predictions performed by the previous node. Notice that this function has no outputs, except logging. """<line_sep># Get true class index target=np.argmax(test_y.to_numpy() axis=1)<line_sep># Calculate accuracy of predictions accuracy=np.sum(predictions<eq>target)/target.shape[0]<line_sep># Log the accuracy of the model log=logging.getLogger(__name__)<line_sep>log.info("Model accuracy on test set: %0.2f%%" accuracy<times>100)<block_end><def_stmt>_sigmoid z<block_start>"""A helper sigmoid function used by the training and the scoring nodes."""<line_sep><return>1/(1+np.exp(-z))<block_end>
<import_from_future_stmt> print_function<import_stmt>constants rauth subprocess os json base64 urlparse<line_sep>""" Auth module for OneNote. Our strategy is to store only the refresh token and use it every time to get a new access token. """<def_stmt>get_oauth_service <block_start>client_id,client_secret=json.loads(base64.b64decode(constants.CLIENT_DATA))<line_sep><return>rauth.OAuth2Service(client_id=client_id client_secret=client_secret access_token_url=constants.ENDPOINT_ACCESS authorize_url=constants.ENDPOINT_AUTHORIZE base_url=constants.ENDPOINT_BASE)<block_end><def_stmt>get_session stored_refresh_token<block_start>""" called in main extension script to actually get a usable session """<line_sep>service=get_oauth_service()<line_sep>r=service.get_raw_access_token(data={'refresh_token':stored_refresh_token 'grant_type':'refresh_token'})<line_sep><return>service.get_session(r.json()['access_token'])<block_end><def_stmt>access authorization_code<block_start>""" obtain the refresh token """<line_sep><return>get_oauth_service().get_raw_access_token(data={'code':authorization_code 'grant_type':'authorization_code' 'redirect_uri':constants.CALLBACK}).json()['refresh_token']<block_end><def_stmt>authorize <block_start>""" send user to the oauth autorization url in their browser """<line_sep>subprocess.call(['open' get_oauth_service().get_authorize_url(**constants.AUTHORIZE_DATA)])<block_end><def_stmt>main callback_final=<none><block_start>""" this is called once (with no params) when the user clicks 'log in', and again (with params) when they click though the callback landing url """<if_stmt>(callback_final)<block_start>print(json.dumps(access(urlparse.parse_qs(callback_final)['code'])) end='')<block_end><else_stmt><block_start>print(json.dumps(authorize()) end='')<line_sep>exit(4)<block_end><block_end># indicates to PopClip that a callback will follow <if_stmt>__name__<eq>'__main__'<block_start>main(os.getenv('POPCLIP_AUTH_CALLBACK_FINAL'))<block_end>
<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>django.test SimpleTestCase TestCase<import_from_stmt>django.urls reverse<class_stmt>HomePageTests(SimpleTestCase)<block_start><def_stmt>test_home_page_status_code self<block_start>response=self.client.get('/')<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_view_url_by_name self<block_start>response=self.client.get(reverse('home'))<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_view_uses_correct_template self<block_start>response=self.client.get(reverse('home'))<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertTemplateUsed(response 'home.html')<block_end><block_end><class_stmt>SignupPageTests(TestCase)<block_start>username='newuser'<line_sep>email='<EMAIL>'<def_stmt>test_signup_page_status_code self<block_start>response=self.client.get('/accounts/signup/')<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_view_url_by_name self<block_start>response=self.client.get(reverse('signup'))<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_view_uses_correct_template self<block_start>response=self.client.get(reverse('signup'))<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertTemplateUsed(response 'registration/signup.html')<block_end><def_stmt>test_signup_form self<block_start>new_user=get_user_model().objects.create_user(self.username self.email)<line_sep>self.assertEqual(get_user_model().objects.all().count() 1)<line_sep>self.assertEqual(get_user_model().objects.all()[0].username self.username)<line_sep>self.assertEqual(get_user_model().objects.all()[0].email self.email)<block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>argparse<import_stmt>os.path<as>osp<import_stmt>cv2<import_stmt>mmcv<import_stmt>numpy<as>np<try_stmt><block_start><import_stmt>imageio<block_end><except_stmt>ImportError<block_start>imageio=<none><block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Merge images and visualized flow')<line_sep>parser.add_argument('--img_dir' type=str default=<none> help='directory of images')<line_sep>parser.add_argument('--flow_dir' type=str default=<none> help='directory of visualized flow')<line_sep>parser.add_argument('--resize_factor' type=float default=0.5 help='resize factor for gif')<line_sep>parser.add_argument('--out_dir' type=str default=<none> help='directory to save merged results')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>merge_imgs_flow img_dir:str flow_dir:str out_dir:str<arrow><none><block_start>"""Load images and visualized flow maps and merge them. Args: img_dir ([str): The directory of images. flow_dir (str): The directory of flow maps. out_dir (str): The directory to save the frames """<line_sep>img_files=list(mmcv.scandir(img_dir))<line_sep>flow_files=list(mmcv.scandir(flow_dir))<line_sep>img_files.sort()<line_sep>flow_files.sort()<line_sep># img is longer than flow <for_stmt>i range(len(img_files)-1)<block_start>img=mmcv.imread(osp.join(img_dir img_files[i]))<line_sep>flow=mmcv.imread(osp.join(flow_dir flow_files[i]))<line_sep>frame=np.concatenate((img flow) axis=1)<line_sep>cv2.imwrite(osp.join(out_dir flow_files[i]) frame)<block_end><block_end><def_stmt>main <block_start>args=parse_args()<line_sep>merge_imgs_flow(args.img_dir args.flow_dir args.out_dir)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- <import_stmt>pytest<line_sep>@pytest.mark.online<class_stmt>TestNpoWatchlistInfo<block_start>config=""" tasks: test: npo_watchlist: email: '<EMAIL>' password: '<PASSWORD>!' """<def_stmt>test_npowatchlist_lookup self execute_task<block_start>"""npo_watchlist: Test npo watchlist lookup (ONLINE)"""<line_sep>task=execute_task('test')<line_sep>entry=task.find_entry(url='https://www.npostart.nl/zondag-met-lubach/09-11-2014/VPWON_1220631')<line_sep># s01e01 <assert_stmt>entry['npo_id']<eq>'VPWON_1220631'<assert_stmt>entry['npo_url']<eq>'https://www.npostart.nl/zondag-met-lubach/VPWON_1250334'<assert_stmt>entry['npo_name']<eq>'Zondag met Lubach'<assert_stmt>(entry['npo_description']<eq>'Zeven dagen nieuws in dertig minuten, satirisch geremixt door Arjen Lubach. Nog actueler, nog satirischer en nog vaker nog het woord nog.')<assert_stmt>entry['npo_runtime']<eq>'32'<assert_stmt>entry['npo_premium']<is><false><assert_stmt>(entry['npo_version']<eq>'NPO.release-1.58.0')<line_sep># specify for which version of NPO website we did run this unittest entry=(task.find_entry(url='https://www.npostart.nl/14-01-2014/VARA_101348553')<is><none>)<line_sep># episode with weird (and broken) URL and should be skipped entry=task.find_entry(url='https://www.npostart.nl/zembla/12-12-2013/VARA_101320582')<line_sep># check that the next episode it there though <assert_stmt>entry['npo_id']<eq>'VARA_101320582'<assert_stmt>entry['npo_url']<eq>'https://www.npostart.nl/zembla/VARA_101377863'<assert_stmt>entry['npo_name']<eq>'ZEMBLA'<line_sep>entry=task.find_entry(url='https://www.npostart.nl/typisch-overvecht/24-05-2018/BV_101388144')<assert_stmt>entry['npo_id']<eq>'BV_101388144'<assert_stmt>entry['npo_url']<eq>'https://www.npostart.nl/typisch/BV_101386658'<assert_stmt>entry['npo_name']<eq>'Typisch'<line_sep>entry=task.find_entry(url='https://www.npostart.nl/zembla/14-10-2007/VARA_101153941')<line_sep># episode without a running time <assert_stmt>entry['npo_runtime']<eq>'0'<assert_stmt>(task.find_entry(url='https://www.npostart.nl/11-04-2014/KN_1656572')<is><none>)<line_sep># episode without a name (and broken URL) that should be skipped <assert_stmt>(task.find_entry(url='https://www.npostart.nl/zondag-met-lubach-westeros-the-series/04-09-2017/WO_VPRO_10651334')<is><none>)<block_end><block_end># a trailer for the series, that should not be listed @pytest.mark.online<class_stmt>TestNpoWatchlistPremium<block_start>config=""" tasks: test: npo_watchlist: email: '<EMAIL>' password: '<PASSWORD>!' download_premium: yes """<def_stmt>test_npowatchlist_lookup self execute_task<block_start>"""npo_watchlist: Test npo watchlist lookup (ONLINE)"""<line_sep>task=execute_task('test')<line_sep>entry=task.find_entry(url='https://www.npostart.nl/hollands-hoop/08-02-2020/BV_101396963')<line_sep># a premium serie <assert_stmt>entry['npo_id']<eq>'BV_101396963'<assert_stmt>entry['npo_url']<eq>'https://www.npostart.nl/hollands-hoop/BV_101385153'<assert_stmt>entry['npo_name']<eq>'Hollands Hoop'<assert_stmt>entry['npo_runtime']<eq>'53'<assert_stmt>entry['npo_premium']<is><true><block_end><block_end>@pytest.mark.online<class_stmt>TestNpoWatchlistLanguageTheTVDBLookup<block_start>config=""" tasks: test: npo_watchlist: email: '<PASSWORD> <EMAIL>' password: '<PASSWORD>!' thetvdb_lookup: yes """<def_stmt>test_tvdblang_lookup self execute_task<block_start>"""npo_watchlist: Test npo_watchlist tvdb language lookup (ONLINE)"""<line_sep>task=execute_task('test')<line_sep>entry=task.find_entry(url='https://www.npostart.nl/zondag-met-lubach/09-11-2014/VPWON_1220631')<line_sep># s01e01 <assert_stmt>entry['npo_language']<eq>'nl'<assert_stmt>entry['language']<eq>'nl'<assert_stmt>entry['tvdb_id']<eq>288799<assert_stmt>entry['tvdb_language']<eq>'nl'<block_end><block_end>
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fake groups data. TODO: consolidate with other fake group test data. """<line_sep>FAKE_GROUPS_DB_ROWS=[{'group_id':'1111aaaa1' 'member_role':'OWNER' 'member_type':'USER' 'member_email':'<EMAIL>'} {'group_id':'2222bbbb2' 'member_role':'MEMBER' 'member_type':'GROUP' 'member_email':'<EMAIL>'} {'group_id':'2222bbbb2' 'member_role':'OWNER' 'member_type':'GROUP' 'member_email':'<EMAIL>'} {'group_id':'1111aaaa1' 'member_role':'MEMBER' 'member_type':'USER' 'member_email':'<EMAIL>'} {'group_id':'1111aaaa1' 'member_role':'MEMBER' 'member_type':'USER' 'member_email':'<EMAIL>'} ]<line_sep>
# -*- coding: utf-8 -*- r""" PyTorch를 이용한 딥러닝 ************************** **번역**: `황성수 <https://github.com/adonisues>`_ 딥러닝 블록 구축 : 아핀 맵(affine maps), 비선형성, 객체 ========================================================================== 딥러닝은 영리한 방법으로 비선형성을 가진 선형성을 구성하는 것으로 이루어집니다. 비선형성의 도입은 강력한 모델을 가능하게 합니다. 이 섹션에서 이 핵심 구성 요소를 다루고, 객체 함수를 만들고, 어떻게 모델이 학습되지는 살펴봅시다. 아핀 맵 ~~~~~~~~~~~ 딥러닝의 핵심 작업자 중 하나는 아핀 맵 입니다. 이 함수 :math:`f(x)` 는 다음과 같습니다. .. math:: f(x) = Ax + b 여기서 :math:`A` 는 행렬, :math:`x, b` 는 벡터 입니다. 여기서 학습되는 변수는 :math:`A` 와 :math:`b` 입니다. 종종 :math:`b` 는 *편향(Bias)* 이라 불립니다. PyTorch 와 대부분의 다른 딥러닝 프레임워크들은 고전적인 선형 대수학와 조금 다르게 동작합니다. 입력의 열 대신에 행으로 매핑합니다. 즉 주어진 :math:`A` 에서 출력의 :math:`i` 번째 행은 입력의 :math:`i` 번째 행에 매핑되고 편향(Bias)을 더합니다. 아래 예시를 살펴보십시오. """<line_sep># Author: <NAME> <import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<as>optim<line_sep>torch.manual_seed(1)<line_sep>###################################################################### lin=nn.Linear(5 3)# maps from R^5 to R^3, parameters A, b # data is 2x5. A maps from 5 to 3... can we map "data" under A? data=torch.randn(2 5)<line_sep>print(lin(data))# yes ###################################################################### # 비선형성 # ~~~~~~~~~ # # 먼저 왜 비선형성이 필요한지 설명하는 다음 사실을 주목하십시오. # :math:`f(x) = Ax + b` 와 :math:`g(x) = Cx + d` 두개의 아핀맵이 있다고 가정합니다. # :math:`f(g(x))` 는 무엇일까요? # # .. math:: f(g(x)) = A(Cx + d) + b = ACx + (Ad + b) # # :math:`AC` 는 행렬이고 :math:`Ad + b` 는 벡터이므로 아핀맵 구성은 # 아핀맵이 주어집니다. # # 이것으로부터, 신경망이 아핀 구성의 긴 체인이 되길 원한다면, # 단일 아핀 맵을 작성하는 것보다 이것이 모델에 추가하는 새로운 힘이 # 없다는 것을 알 수 있습니다. # # 아핀 계층 사이에 만약 비선형성을 적용한다면 # 이것은 위 경우와 달리 더욱 더 강력한 모델을 구축할 수 있습니다. # # 핵심적인 비선형성 :math:`\tanh(x), \sigma(x), \text{ReLU}(x)` 들이 가장 # 일반적입니다. 아마 의문이 생길겁니다 : "왜 이런 함수들이지? 나는 다른 많은 # 비선형성을 생각할 수 있는데". 그 이유는 그들이 변화도(gradient)를 계산하기 쉽고, # 변화도 연산은 학습에 필수적이기 때문입니다. # 예를 들어서 # # .. math:: \frac{d\sigma}{dx} = \sigma(x)(1 - \sigma(x)) # # 빠른 참고: AI 클래스에 대한 소개에서 일부 신경망을 배웠지만 :math:`\sigma(x)` 가 기본이었을 것입니다. # 일반적으로 사람들은 실제로 그것을 사용하지 않고 피합니다. # 이것은 변화도가 인수의 절대 값이 커짐에 따라 매우 빨리 *사라지기* 때문입니다. # 작은 변화도는 학습하기 어렵다는 것을 의미합니다. # 대부분의 사람들은 tanh 또는 ReLU를 기본값으로 사용합니다. # # Pytorch에서 대부분의 비선형성은 torch.functional에 있습니다 ( F 로 가져옵니다) # 일반적으로 비선형성은 아핀맵과 같은 파라미터를 가지고 있지 않습니다. # 즉, 학습 중에 업데이트되는 가중치가 없습니다. data=torch.randn(2 2)<line_sep>print(data)<line_sep>print(F.relu(data))<line_sep>###################################################################### # Softmax 및 확률 # ~~~~~~~~~~~~~~~~~~~~~~~~~ # # 함수 :math:`\text{Softmax}(x)` 또한 단지 비선형성 이지만, 일반적으로 네트워크에서 # 마지막으로 수행되는 작업이라는 점에서 특별합니다. # 이는 실수의 벡터를 취하여 확률 분포를 반환하기 때문입니다. # 정의는 다음과 같습니다. :math:`x` 는 실수 벡터(음수, 양수 , 제약 없음)라고 하면, # i번째 구성 요소는 :math:`\text{Softmax}(x)` 는 # # .. math:: \frac{\exp(x_i)}{\sum_j \exp(x_j)} # # 출력은 확률 분포라는 것이 분명해야합니다: # 각 요소는 음수가 아니며 모든 구성 요소의 합은 1입니다. # # 모두 음수가 아니게 하기 위해서 입력에 요소 단위의 지수 연산자를 적용한 다음 # 정규화 상수로 나누는 것도 생각할 수 있습니다. # # Softmax 도 torch.nn.functional 에 있습니다. data=torch.randn(5)<line_sep>print(data)<line_sep>print(F.softmax(data dim=0))<line_sep>print(F.softmax(data dim=0).sum())# 확률 분포이기 때문에 합이 1 입니다! print(F.log_softmax(data dim=0))# log_softmax 도 있습니다. ###################################################################### # 목적 함수(Objective Functions) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # 목적 함수는 네트워크가 최소화하도록 학습되는 함수입니다 # ( *손실 함수* 또는 *비용 함수* 라고 함). # 먼저 학습 인스턴스를 선택하고 신경망을 통해 실행한 다음 출력의 손실을 계산합니다. # 그런 다음 손실 함수의 미분을 취함으로써 모델의 파라미터가 업데이트됩니다. # 직관적으로 모델이 자신의 대답에 완전히 확신하고 대답이 잘못되면 손실이 높아집니다. # 답변에 자신이 있고 답변이 맞으면 손실이 적습니다. # # 학습 예제에서 손실 함수를 최소화하려는 아이디어는 # 네트워크가 잘 일반화되고 개발자 세트, 테스트 세트 또는 프로덕션에서 # 나타나지 않았던 예제(unseen examples)에 대해 작은 손실을 가지기를 바랍니다. # 손실 함수의 예로 *음의 로그 우도 손실(negative log likelihood loss)* 있습니다. # 이 것은 다중 클래스 분류에서 매우 자주 사용되는 목적 함수입니다. # 감독 다중 클래스 분류의 경우에는 올바른 출력(정답을 맞춘 출력)의 음의 로그 확률을 # 최소화하도록 네트워크를 교육하는 것을 의미합니다. # (또는 이와 동등하게 올바른 출력의 로그 확률을 최대화하십시오) # ###################################################################### # 최적화와 학습 # ========================= # # 그럼 인스턴스에 대해 손실 함수를 계산할 수 있다는 것은 무엇입니까? 그걸 어떻게 할까요? # 우리는 이전에 Tensor가 그것을 계산하는데 사용된 것들에 해당하는 변화도를 # 계산하는 방법을 알고 있다는 것을 보았습니다. # 손실은 Tensor이기 때문에 그것을 계산하는데 사용된 모든 파라미터와 관련하여 # 변화도를 계산할 수 있습니다! 그런 다음 표준 변화도 업데이트를 수행 할 수 있습니다. # :math:`\theta` 가 우리의 파라미터라고 합시다. # :math:`L(\theta)` 는 손실 함수, 그리고 :math:`\eta` 는 양의 러닝 레이트 입니다. 그러면 # # .. math:: \theta^{(t+1)} = \theta^{(t)} - \eta \nabla_\theta L(\theta) # # 이 기본적인 그레디언트 업데이트 이상의 것을 하기 위해서 많은 알고리즘과 # 시도되고 있는 활발한 연구들이 있습니다. # 많은 시도들은 학습 시간에 일어나는 것에 기반한 러닝 레이트를 변경해봅니다. # 당신이 정말 관심이 없다면 특별히 이들 알고리즘이 무엇을 하는지 걱정할 # 필요가 없습니다. Torch는 torch.optim 패키지에서 많은 것을 제공하며 # 완전히 공개되어 있습니다. 가장 단순한 변화도 업데이트 사용은 # 더 복잡한 알고리즘을 사용하는 것과 동일합니다. # 다른 업데이트 알고리즘과 업데이트 알고리즘을 위한 다른 파라미터(다른 초기 러닝 레이트)를 # 시도해 보는 것은 네트워크의 성능을 최적화하는데 중요합니다. # 종종 기본 SGD를 Adam 또는 RMSprop 으로 교체하는 것이 눈에 띄게 성능 # 향상 시킵니다. # ###################################################################### # Pytorch 에서 네트워크 구성요소 생성하기 # ========================================== # # NLP에 초점을 맞추기 전에, PyTorch에서 아핀 맵과 비선형성만을 사용하여 # 네트워크를 구축하는 주석 처리된 예제를 수행 할 수 있습니다. # 또한 손실 함수를 계산하는 방법, PyTorch에 내장된 음의 로그 우도를 사용하는 방법, # 역전파를 통해 매개 변수를 업데이트하는 방법을 볼 것입니다. # # 모든 네트워크 구성 요소는 nn.Module에서 상속 받아 forward() 메서드를 재정의해야합니다. # 이것은 상용구에 관한 것입니다. nn.Module에서의 상속은 구성 요소에 기능을 제공합니다. # 예를 들어 그것은 학습 가능한 파라미터를 추적하도록 만들고, # ``.to(device)`` 로 CPU와 GPU 를 교환할수 있습니다. # ``torch.device("cpu")`` 는 CPU 장치를 ``torch.device("cuda:0")`` 는 GPU 장치를 사용합니다. # # 희소한 Bag-of-Words Representation 을 받아서 두개의 레이블 "영어"와 "스페인어"의 확률 분포 # 출력하는 네트워크의 주석이 달린 예시를 작성해 봅시다. # 이 모델은 단순한 논리 회귀 입니다. # ###################################################################### # 예제: 논리 회귀 Bag-of-Words 분류기 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # 우리 모델은 희소한 BoW 표현을 레이블에 대한 로그 확률로 매핑합니다. # 사전의 각 단어에 하나의 색인을 할당합니다. # 예를 들어서 전체 사전이 각각 0과 1의 색인을 가진 두개의 단어 "hello" 와 "world" 라고 합시다. # "hello hello hello hello" 문장의 BoW 벡터는 다음과 같습니다. # # .. math:: \left[ 4, 0 \right] # # "hello world world hello"는 다음과 같습니다. # # .. math:: \left[ 2, 2 \right] # # 일반화 하면 다음과 같습니다. # # .. math:: \left[ \text{Count}(\text{hello}), \text{Count}(\text{world}) \right] # # 이 BOW 벡터를 :math:`x` 라하면 네트워크의 출력은 다음과 같습니다: # # .. math:: \log \text{Softmax}(Ax + b) # # 즉, 아핀맵에 입력을 주고 그 다음 Log Softmax 를 합니다. # data=[("me gusta comer en la cafeteria".split() "SPANISH") ("Give it to me".split() "ENGLISH") ("No creo que sea una buena idea".split() "SPANISH") ("No it is not a good idea to get lost at sea".split() "ENGLISH")]<line_sep>test_data=[("Yo creo que si".split() "SPANISH") ("it is lost on me".split() "ENGLISH")]<line_sep># word_to_ix 는 사전의 각 단어를 고유한 정수로 매핑하고 # 그것은 BoW 벡터에서 자신의 색인이 됩니다. word_to_ix={}<for_stmt>sent,_ data+test_data<block_start><for_stmt>word sent<block_start><if_stmt>word<not><in>word_to_ix<block_start>word_to_ix[word]=len(word_to_ix)<block_end><block_end><block_end>print(word_to_ix)<line_sep>VOCAB_SIZE=len(word_to_ix)<line_sep>NUM_LABELS=2<class_stmt>BoWClassifier(nn.Module)# nn.Module로 부터 상속 받기 ! <block_start><def_stmt>__init__ self num_labels vocab_size# calls the init function of nn.Module의 초기화 함수 호출. Dont get confused by syntax, # 문법에 혼란스러워 하지 마시고 단지 항상 nn.Module 에서 수행하십시오. <block_start>super(BoWClassifier self).__init__()<line_sep># 필요한 파라미터를 정의 하십시오. 이 경우에는 아핀 매핑의 매개 변수 인 A와 b가 필요합니다. # Torch는 아핀 맵을 제공하는 nn.Linear()를 정의합니다 # 입력 차원이 vocab_size이고 출력이 num_labels 인 이유를 이해했는지 확인하십시오! self.linear=nn.Linear(vocab_size num_labels)<line_sep># 주의! 비선형성 Log Softmax에는 파라미터가 없습니다! # 그래서 여기에 대해 걱정할 필요가 없습니다. <block_end><def_stmt>forward self bow_vec# 선형 계층를 통해 입력을 전달한 다음 log_softmax로 전달합니다. # 많은 비선형성 및 기타 기능이 torch.nn.functional 에 있습니다 <block_start><return>F.log_softmax(self.linear(bow_vec) dim=1)<block_end><block_end><def_stmt>make_bow_vector sentence word_to_ix<block_start>vec=torch.zeros(len(word_to_ix))<for_stmt>word sentence<block_start>vec[word_to_ix[word]]<augadd>1<block_end><return>vec.view(1 -1)<block_end><def_stmt>make_target label label_to_ix<block_start><return>torch.LongTensor([label_to_ix[label]])<block_end>model=BoWClassifier(NUM_LABELS VOCAB_SIZE)<line_sep># 모델은 자신의 파라미터를 알고 있습니다. 아래에 있는 첫번째 출력은 A 두번째는 b 입니다. # 모듈의 __init__ 함수에서 클래스 변수에 구성 요소를 할당 할 때마다 다음 행을 사용하여 완료합니다. # self.linear = nn.Linear(...) # 그런 다음 PyTorch 개발자의 Python 마법을 통해, 모듈(이 경우 BoWClassifier)은 # nn.Linear 파라미터에 대한 지식을 저장합니다 <for_stmt>param model.parameters()<block_start>print(param)<block_end># 모델을 실행하려면 BoW 벡터를 전달합니다. # 여기서 우리는 학습 할 필요가 없으므로 코드는 torch.no_grad()로 싸여 있습니다. <with_stmt>torch.no_grad()<block_start>sample=data[0]<line_sep>bow_vector=make_bow_vector(sample[0] word_to_ix)<line_sep>log_probs=model(bow_vector)<line_sep>print(log_probs)<block_end>###################################################################### # 위의 값 중 어느 것이 ENGLISH와 SPANISH의 로그 확률에 해당하는 값일까요? # 우리는 정의하지 않았지만, 학습하기를 원한다면 필요합니다. # label_to_ix={"SPANISH":0 "ENGLISH":1}<line_sep>###################################################################### # 그럼 학습을 해봅시다! 이를 위해 로그 확률을 얻고, 손실 함수를 계산하고, # 손실 함수의 변화도를 계산한 다음 변화도 단계로 파라미터를 # 업데이트하기 위해 인스턴스를 통과시킵니다. 손실 기능은 nn 패키지의 Torch에서 제공합니다. # nn.NLLLoss()는 원하는 음의 로그 우도 손실입니다. 또한 torch.optim에서 최적화 함수를 정의합니다. # 여기서는 SGD 만 사용합니다. # # NLLLoss에 대한 *입력* 은 로그 확률의 벡터이고 목표 레이블입니다. # 우리를 위한 로그 확률을 계산하지 않습니다. 이것이 네트워크의 마지막 계층이 # Log softmax 인 이유입니다. 손실 함수 nn.CrossEntropyLoss()는 Log softmax를 제외하고는 NLLLoss()와 같습니다. # # 훈련하기 전에 테스트 데이터를 실행하여 전후를 볼 수 있습니다. <with_stmt>torch.no_grad()<block_start><for_stmt>instance,label test_data<block_start>bow_vec=make_bow_vector(instance word_to_ix)<line_sep>log_probs=model(bow_vec)<line_sep>print(log_probs)<block_end><block_end># "creo"에 해당하는 행렬 열을 인쇄하십시오. print(next(model.parameters())[: word_to_ix["creo"]])<line_sep>loss_function=nn.NLLLoss()<line_sep>optimizer=optim.SGD(model.parameters() lr=0.1)<line_sep># 일반적으로 교육 데이터를 여러 번 전달 합니다. # 100은 실제 데이터 세트보다 훨씬 더 크지 만 실제 데이터 세트는 두 개 이상의 인스턴스를 가집니다. # 일반적으로 5 ~ 30 개 에포크가 적당합니다. <for_stmt>epoch range(100)<block_start><for_stmt>instance,label data# 1 단계. PyTorch는 그라데이션을 축적합니다. # 각 인스턴스 전에 그들을 제거해야합니다. <block_start>model.zero_grad()<line_sep># 2 단계. BOW 벡터를 만들고 정수로 텐서로 대상을 싸야합니다. # 예를 들어, 대상이 SPANISH이면 정수 0으로 합니다. # 손실 함수는 로그 확률의 0번째 요소가 SPANISH에 해당하는 로그 확률임을 알 수 있습니다 bow_vec=make_bow_vector(instance word_to_ix)<line_sep>target=make_target(label label_to_ix)<line_sep># 3 단계. 순전파를 실행합니다. log_probs=model(bow_vec)<line_sep># 4 단계. optimizer.step()을 호출하여 손실, 변화도를 계산하고 파라미터를 업데이트합니다. loss=loss_function(log_probs target)<line_sep>loss.backward()<line_sep>optimizer.step()<block_end><block_end><with_stmt>torch.no_grad()<block_start><for_stmt>instance,label test_data<block_start>bow_vec=make_bow_vector(instance word_to_ix)<line_sep>log_probs=model(bow_vec)<line_sep>print(log_probs)<block_end><block_end># 스페인어에 해당하는 색인이 올라갑니다. 영어가 내려갑니다!! print(next(model.parameters())[: word_to_ix["creo"]])<line_sep>###################################################################### # 정답을 얻었습니다! 첫 번째 예제에서는 스페인어의 로그 확률이 ​​훨씬 높고 # 영어의 로그 확률은 테스트 데이터의 두 번째에서 훨씬 높다는 것을 알 수 있습니다. # # 이제 PyTorch 구성 요소를 만들고 이를 통해 일부 데이터를 전달하고 # 변화도 업데이트를 수행하는 방법을 살펴 보았습니다. # 우리는 심도있는 NLP가 제공해야하는 것을 더 깊이 파고들 준비가되었습니다. #
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.utils.data<import_from_stmt>skimage.draw circle<import_from_stmt>skimage.measure find_contours<import_from_stmt>PIL Image<class_stmt>RenderedPoseDataset(torch.utils.data.Dataset)<block_start><def_stmt>__init__ self data_root objects subset_num transform<block_start>self.transform=transform<line_sep># images image_dirs=[]<line_sep>self.object_indices=[]<for_stmt>o objects<block_start>image_dirs.append(os.path.join(data_root o 'subset_{:08}'.format(subset_num)))<block_end><for_stmt>image_dir image_dirs<block_start><assert_stmt>os.path.exists(image_dir)<block_end>self.image_paths=[]<for_stmt>i,image_dir enumerate(image_dirs)<block_start>image_names=sorted(os.listdir(image_dir))<line_sep>self.image_paths.extend([os.path.join(image_dir name)<for>name image_names])<line_sep>self.object_indices.extend(i<times>np.ones(len(image_names)))<block_end>self.object_indices=np.array(self.object_indices dtype=np.int64)<assert_stmt>len(self.object_indices)<eq>len(self.image_paths)<line_sep># poses poses_paths=[]<for_stmt>o objects<block_start>poses_paths.append(os.path.join(data_root o 'poses' 'subset_{:08}.txt'.format(subset_num)))<block_end><for_stmt>poses_path poses_paths<block_start><assert_stmt>os.path.exists(poses_path)<block_end>self.poses=[]<for_stmt>poses_path poses_paths<block_start>self.poses.extend(np.loadtxt(poses_path).astype(np.float32))<block_end><assert_stmt>len(self.poses)<eq>len(self.image_paths)<block_end><def_stmt>__getitem__ self index<block_start>object_index=self.object_indices[index]<line_sep>image=Image.open(self.image_paths[index])<line_sep>image=self.transform(image)<line_sep># enforce quaternion [w, x, y, z] to have positive w target_pose=self.poses[index]<if_stmt>target_pose[3]<l>0<block_start>target_pose[3:]=-target_pose[3:]<block_end><return>image target_pose object_index<block_end><def_stmt>__len__ self<block_start><return>len(self.image_paths)<block_end><block_end><class_stmt>OccludedRenderedPoseDataset(torch.utils.data.Dataset)<block_start><def_stmt>__init__ self data_root objects subset_num transform max_circle_size<block_start>self.transform=transform<line_sep>self.max_circle_size=max_circle_size<line_sep># images image_dirs=[]<line_sep>self.object_indices=[]<for_stmt>o objects<block_start>image_dirs.append(os.path.join(data_root o 'subset_{:08}'.format(subset_num)))<block_end><for_stmt>image_dir image_dirs<block_start><assert_stmt>os.path.exists(image_dir)<block_end>self.image_paths=[]<for_stmt>i,image_dir enumerate(image_dirs)<block_start>image_names=sorted(os.listdir(image_dir))<line_sep>self.image_paths.extend([os.path.join(image_dir name)<for>name image_names])<line_sep>self.object_indices.extend(i<times>np.ones(len(image_names)))<block_end>self.object_indices=np.array(self.object_indices dtype=np.int64)<assert_stmt>len(self.object_indices)<eq>len(self.image_paths)<line_sep># poses poses_paths=[]<for_stmt>o objects<block_start>poses_paths.append(os.path.join(data_root o 'poses' 'subset_{:08}.txt'.format(subset_num)))<block_end><for_stmt>poses_path poses_paths<block_start><assert_stmt>os.path.exists(poses_path)<block_end>self.poses=[]<for_stmt>poses_path poses_paths<block_start>self.poses.extend(np.loadtxt(poses_path).astype(np.float32))<block_end><assert_stmt>len(self.poses)<eq>len(self.image_paths)<block_end><def_stmt>__getitem__ self index<block_start>object_index=self.object_indices[index]<line_sep>image=Image.open(self.image_paths[index])<line_sep># if possible, occlude the object np_image=np.array(image)<line_sep>contours=find_contours(np_image.mean(axis=2)<if>np_image.ndim<eq>3<else>np_image 0)<if_stmt>len(contours)<g>0<block_start>contour=sorted(contours key=<lambda>x:-x.shape[0])[0]<if_stmt>len(contour)<g>0<block_start>occluded_image=np_image.copy()<line_sep>circle_center=contour[np.random.choice(len(contour))]<line_sep>r,c=circle_center<line_sep>circle_size=np.random.randint(self.max_circle_size+1)<line_sep>rr,cc=circle(r c circle_size shape=np_image.shape)<line_sep>occluded_image[rr cc]=0<line_sep>image=Image.fromarray(occluded_image)<block_end><block_end>image=self.transform(image)<line_sep># enforce quaternion [w, x, y, z] to have positive w target_pose=self.poses[index]<if_stmt>target_pose[3]<l>0<block_start>target_pose[3:]=-target_pose[3:]<block_end><return>image target_pose object_index<block_end><def_stmt>__len__ self<block_start><return>len(self.image_paths)<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for custom SQLAlchemy types via Magnum DB."""<import_from_stmt>oslo_db exception<as>db_exc<import_from_stmt>oslo_utils uuidutils<import_stmt>magnum.db.sqlalchemy.api<as>sa_api<import_from_stmt>magnum.db.sqlalchemy models<import_from_stmt>magnum.tests.unit.db base<class_stmt>SqlAlchemyCustomTypesTestCase(base.DbTestCase)<block_start><def_stmt>test_JSONEncodedDict_default_value self# Create ClusterTemplate w/o labels <block_start>cluster_template1_id=uuidutils.generate_uuid()<line_sep>self.dbapi.create_cluster_template({'uuid':cluster_template1_id})<line_sep>cluster_template1=sa_api.model_query(models.ClusterTemplate).filter_by(uuid=cluster_template1_id).one()<line_sep>self.assertEqual({} cluster_template1.labels)<line_sep># Create ClusterTemplate with labels cluster_template2_id=uuidutils.generate_uuid()<line_sep>self.dbapi.create_cluster_template({'uuid':cluster_template2_id 'labels':{'bar':'foo'}})<line_sep>cluster_template2=sa_api.model_query(models.ClusterTemplate).filter_by(uuid=cluster_template2_id).one()<line_sep>self.assertEqual('foo' cluster_template2.labels['bar'])<block_end><def_stmt>test_JSONEncodedDict_type_check self<block_start>self.assertRaises(db_exc.DBError self.dbapi.create_cluster_template {'labels':['this is not a dict']})<block_end><def_stmt>test_JSONEncodedList_default_value self# Create nodegroup w/o node_addresses <block_start>nodegroup1_id=uuidutils.generate_uuid()<line_sep>self.dbapi.create_nodegroup({'uuid':nodegroup1_id})<line_sep>nodegroup1=sa_api.model_query(models.NodeGroup).filter_by(uuid=nodegroup1_id).one()<line_sep>self.assertEqual([] nodegroup1.node_addresses)<line_sep># Create nodegroup with node_addresses nodegroup2_id=uuidutils.generate_uuid()<line_sep>self.dbapi.create_nodegroup({'uuid':nodegroup2_id 'node_addresses':['mynode_address1' 'mynode_address2']})<line_sep>nodegroup2=sa_api.model_query(models.NodeGroup).filter_by(uuid=nodegroup2_id).one()<line_sep>self.assertEqual(['mynode_address1' 'mynode_address2'] nodegroup2.node_addresses)<block_end><def_stmt>test_JSONEncodedList_type_check self<block_start>self.assertRaises(db_exc.DBError self.dbapi.create_nodegroup {'node_addresses':{'this is not a list':'test'}})<block_end><block_end>
<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>..registry LOSSES<line_sep>@LOSSES.register_module<class_stmt>CosineEmbeddingLoss(nn.Module)<block_start><def_stmt>__init__ self margin=0. size_average=<none> reduce=<none> reduction='mean'<block_start>super(CosineEmbeddingLoss self).__init__()<line_sep>self.margin=margin<line_sep>self.reduction=reduction<block_end><def_stmt>forward self input1 input2 target<block_start><return>F.cosine_embedding_loss(input1 input2 target margin=self.margin reduction=self.reduction)<block_end><block_end>
# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ <import_from_future_stmt> print_function<import_stmt>getpass<import_stmt>os<import_stmt>sys<import_from_stmt>sawtooth_signing create_context<import_from_stmt>sawtooth_cli.exceptions CliException<def_stmt>add_keygen_parser subparsers parent_parser<block_start>parser=subparsers.add_parser('keygen' help='Creates user signing keys' description='Generates keys with which the user can sign '<concat>'transactions and batches.' epilog='The private and public key files are stored in '<concat>'<key-dir>/<key-name>.priv and <key-dir>/<key-name>.pub. '<concat>'<key-dir> defaults to ~/.sawtooth and <key-name> defaults to $USER.' parents=[parent_parser])<line_sep>parser.add_argument('key_name' help='specify the name of the key to create' nargs='?')<line_sep>parser.add_argument('--key-dir' help="specify the directory for the key files")<line_sep>parser.add_argument('--force' help="overwrite files if they exist" action='store_true')<line_sep>parser.add_argument('-q' '--quiet' help="do not display output" action='store_true')<block_end><def_stmt>do_keygen args<block_start><if_stmt>args.key_name<is><not><none><block_start>key_name=args.key_name<block_end><else_stmt><block_start>key_name=getpass.getuser()<block_end><if_stmt>args.key_dir<is><not><none><block_start>key_dir=args.key_dir<if_stmt><not>os.path.exists(key_dir)<block_start><raise>CliException('no such directory: {}'.format(key_dir))<block_end><block_end><else_stmt><block_start>key_dir=os.path.join(os.path.expanduser('~') '.sawtooth' 'keys')<if_stmt><not>os.path.exists(key_dir)<block_start><if_stmt><not>args.quiet<block_start>print('creating key directory: {}'.format(key_dir))<block_end><try_stmt><block_start>os.makedirs(key_dir 0o755)<block_end><except_stmt>IOError<as>e<block_start><raise>CliException('IOError: {}'.format(str(e)))<from>e<block_end><block_end><block_end>priv_filename=os.path.join(key_dir key_name+'.priv')<line_sep>pub_filename=os.path.join(key_dir key_name+'.pub')<if_stmt><not>args.force<block_start>file_exists=<false><for_stmt>filename [priv_filename pub_filename]<block_start><if_stmt>os.path.exists(filename)<block_start>file_exists=<true><line_sep>print('file exists: {}'.format(filename) file=sys.stderr)<block_end><block_end><if_stmt>file_exists<block_start><raise>CliException('files exist, rerun with --force to overwrite existing files')<block_end><block_end>context=create_context('secp256k1')<line_sep>private_key=context.new_random_private_key()<line_sep>public_key=context.get_public_key(private_key)<try_stmt><block_start>priv_exists=os.path.exists(priv_filename)<with_stmt>open(priv_filename 'w')<as>priv_fd<block_start><if_stmt><not>args.quiet<block_start><if_stmt>priv_exists<block_start>print('overwriting file: {}'.format(priv_filename))<block_end><else_stmt><block_start>print('writing file: {}'.format(priv_filename))<block_end><block_end>priv_fd.write(private_key.as_hex())<line_sep>priv_fd.write('\n')<line_sep># Set the private key u+rw g+r os.chmod(priv_filename 0o640)<block_end>pub_exists=os.path.exists(pub_filename)<with_stmt>open(pub_filename 'w')<as>pub_fd<block_start><if_stmt><not>args.quiet<block_start><if_stmt>pub_exists<block_start>print('overwriting file: {}'.format(pub_filename))<block_end><else_stmt><block_start>print('writing file: {}'.format(pub_filename))<block_end><block_end>pub_fd.write(public_key.as_hex())<line_sep>pub_fd.write('\n')<line_sep># Set the public key u+rw g+r o+r os.chmod(pub_filename 0o644)<block_end><block_end><except_stmt>IOError<as>ioe<block_start><raise>CliException('IOError: {}'.format(str(ioe)))<from>ioe<block_end><block_end>
# TestObjCIvarDiscovery.py # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors # # ------------------------------------------------------------------------------ """ Test that we can correctly see ivars from the Objective-C runtime """<import_stmt>lldb<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test.decorators *<import_stmt>lldbsuite.test.lldbutil<as>lldbutil<import_stmt>os<import_stmt>os.path<import_stmt>re<import_stmt>unittest2<import_stmt>shutil<class_stmt>TestObjCIVarDiscovery(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<line_sep>@skipUnlessDarwin@skipIf(debug_info=no_match("dsym"))<def_stmt>test_nodbg self<block_start>self.build()<line_sep>shutil.rmtree(self.getBuildArtifact("aTestFramework.framework/Versions/A/aTestFramework.dSYM"))<line_sep>self.do_test(<false>)<block_end>@skipUnlessDarwin@skipIf(debug_info=no_match("dsym"))<def_stmt>test_dbg self<block_start>self.build()<line_sep>self.do_test(<true>)<block_end><def_stmt>prepare_value self value<block_start>value.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)<line_sep>value.SetPreferSyntheticValue(<true>)<line_sep><return>value<block_end><def_stmt>do_test self dbg<block_start>"""Test that we can correctly see ivars from the Objective-C runtime"""<line_sep>target=self.dbg.CreateTarget(self.getBuildArtifact("a.out"))<line_sep>self.assertTrue(target VALID_TARGET)<line_sep>#self.registerSharedLibrariesWithTarget(target, ['aTestFramework.framework/aTestFramework']) <if_stmt>lldb.remote_platform<block_start>wd=lldb.remote_platform.GetWorkingDirectory()<line_sep>directory='aTestFramework.framework/Versions/A/'<line_sep>filename=directory+'/aTestFramework'<line_sep>cur_dir=wd<for_stmt>d directory.split('/')<block_start>err=lldb.remote_platform.MakeDirectory(os.path.join(cur_dir d))<line_sep>self.assertFalse(err.Fail() 'Failed to mkdir '+d+':'+str(err))<line_sep>cur_dir=os.path.join(cur_dir d)<block_end>err=lldb.remote_platform.Put(lldb.SBFileSpec(self.getBuildArtifact(filename)) lldb.SBFileSpec(os.path.join(wd filename)))<line_sep>self.assertFalse(err.Fail() 'Failed to copy '+filename+':'+str(err))<block_end># Launch the process, and do not stop at the entry point. lldbutil.run_to_source_breakpoint(self 'Set breakpoint here' lldb.SBFileSpec('main.swift'))<if_stmt>dbg<block_start>self.expect("image list" "Contents/Resources/DWARF/aTestFramework")<block_end><else_stmt><block_start>self.expect("image list" "Contents/Resources/DWARF/aTestFramework" matching=<false>)<block_end>self.runCmd("frame variable -d run --show-types --ptr-depth=1")<line_sep>obj=self.prepare_value(self.frame().FindVariable("object"))<line_sep>mysubclass=self.prepare_value(obj.GetChildAtIndex(0))<line_sep>myclass=self.prepare_value(mysubclass.GetChildAtIndex(0))<line_sep>m_pair=myclass.GetChildMemberWithName("m_pair")<line_sep>m_pair_A=m_pair.GetChildMemberWithName("A")<line_sep>m_pair_B=m_pair.GetChildMemberWithName("B")<line_sep>self.assertEqual(m_pair_A.GetValueAsUnsigned() 1)<line_sep>self.assertEqual(m_pair_B.GetValueAsUnsigned() 2)<line_sep>m_derived=self.prepare_value(myclass.GetChildMemberWithName("m_base"))<line_sep>m_derivedX=m_derived.GetChildMemberWithName("m_DerivedX")<line_sep>self.assertEqual(m_derivedX.GetValueAsUnsigned() 1)<line_sep>m_numbers=self.prepare_value(myclass.GetChildMemberWithName("m_myclass_numbers"))<line_sep>self.assertTrue(m_numbers.GetSummary()<eq>'3 elements' "m_myclass_numbers != 3 elements")<line_sep>m_subclass_ivar=mysubclass.GetChildMemberWithName("m_subclass_ivar")<line_sep>self.assertTrue(m_subclass_ivar.GetValueAsUnsigned()<eq>42 "m_subclass_ivar != 42")<line_sep>m_mysubclass_s=mysubclass.GetChildMemberWithName("m_mysubclass_s")<line_sep>self.assertTrue(m_mysubclass_s.GetSummary()<eq>'"an NSString here"' 'm_subclass_s != "an NSString here"')<line_sep>swiftivar=obj.GetChildMemberWithName("swiftivar")<line_sep>self.assertTrue(swiftivar.GetSummary()<eq>'"Hey Swift!"' "swiftivar != Hey Swift")<line_sep>silly=self.prepare_value(obj.GetChildMemberWithName("silly"))<line_sep>silly_x=silly.GetChildMemberWithName("x")<line_sep>silly_url=silly.GetChildMemberWithName("url")<line_sep>self.assertTrue(silly_x.GetValueAsUnsigned()<eq>12 "x != 12")<line_sep>self.assertTrue(silly_url.GetSummary()<eq>'"http://www.apple.com"' "url != apple.com")<block_end><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- <import_from_stmt>msrest.serialization Model<class_stmt>JobRequest(Model)<block_start>"""JobRequest. :param job_id: Job identifier :type job_id: str :param type: Required. The type of job to execute. Possible values include: 'unknown', 'export', 'import', 'backup', 'readDeviceProperties', 'writeDeviceProperties', 'updateDeviceConfiguration', 'rebootDevice', 'factoryResetDevice', 'firmwareUpdate', 'scheduleDeviceMethod', 'scheduleUpdateTwin', 'restoreFromBackup', 'failoverDataCopy' :type type: str or ~service20180630.models.enum :param cloud_to_device_method: Required if jobType is cloudToDeviceMethod. The method type and parameters. :type cloud_to_device_method: ~service20180630.models.CloudToDeviceMethod :param update_twin: :type update_twin: ~service20180630.models.Twin :param query_condition: Required if jobType is updateTwin or cloudToDeviceMethod. Condition for device query to get devices to execute the job on :type query_condition: str :param start_time: ISO 8601 date time to start the job :type start_time: datetime :param max_execution_time_in_seconds: Max execution time in secounds (ttl duration) :type max_execution_time_in_seconds: long """<line_sep>_attribute_map={"job_id":{"key":"jobId" "type":"str"} "type":{"key":"type" "type":"str"} "cloud_to_device_method":{"key":"cloudToDeviceMethod" "type":"CloudToDeviceMethod"} "update_twin":{"key":"updateTwin" "type":"Twin"} "query_condition":{"key":"queryCondition" "type":"str"} "start_time":{"key":"startTime" "type":"iso-8601"} "max_execution_time_in_seconds":{"key":"maxExecutionTimeInSeconds" "type":"long"} }<def_stmt>__init__ self job_id=<none> type=<none> cloud_to_device_method=<none> update_twin=<none> query_condition=<none> start_time=<none> max_execution_time_in_seconds=<none> <block_start>super(JobRequest self).__init__()<line_sep>self.job_id=job_id<line_sep>self.type=type<line_sep>self.cloud_to_device_method=cloud_to_device_method<line_sep>self.update_twin=update_twin<line_sep>self.query_condition=query_condition<line_sep>self.start_time=start_time<line_sep>self.max_execution_time_in_seconds=max_execution_time_in_seconds<block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>argparse<import_stmt>cv2<import_stmt>torch<import_from_stmt>mmdet.apis inference_detector init_detector<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='MMDetection webcam demo')<line_sep>parser.add_argument('config' help='test config file path')<line_sep>parser.add_argument('checkpoint' help='checkpoint file')<line_sep>parser.add_argument('--device' type=str default='cuda:0' help='CPU/CUDA device option')<line_sep>parser.add_argument('--camera-id' type=int default=0 help='camera device id')<line_sep>parser.add_argument('--score-thr' type=float default=0.5 help='bbox score threshold')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>main <block_start>args=parse_args()<line_sep>device=torch.device(args.device)<line_sep>model=init_detector(args.config args.checkpoint device=device)<line_sep>camera=cv2.VideoCapture(args.camera_id)<line_sep>print('Press "Esc", "q" or "Q" to exit.')<while_stmt><true><block_start>ret_val,img=camera.read()<line_sep>result=inference_detector(model img)<line_sep>ch=cv2.waitKey(1)<if_stmt>ch<eq>27<or>ch<eq>ord('q')<or>ch<eq>ord('Q')<block_start><break><block_end>model.show_result(img result score_thr=args.score_thr wait_time=1 show=<true>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<def_stmt>reverse_vowel s<block_start>vowels="AEIOUaeiou"<line_sep>i,j=0 len(s)-1<line_sep>s=list(s)<while_stmt>i<l>j<block_start><while_stmt>i<l>j<and>s[i]<not><in>vowels<block_start>i<augadd>1<block_end><while_stmt>i<l>j<and>s[j]<not><in>vowels<block_start>j<augsub>1<block_end>s[i],s[j]=s[j] s[i]<line_sep>i,j=i+1 j-1<block_end><return>"".join(s)<block_end>
# coding=utf-8 # @Author : bamtercelboo # @Datetime : 2018/07/19 22:35 # @File : handle_wordEmbedding2File.py # @Last Modify Time : 2018/07/19 22:35 # @Contact : <EMAIL>, 163.com} """ handle external word embedding to file """<import_stmt>os<import_stmt>tqdm<class_stmt>WordEmbedding2File<block_start><def_stmt>__init__ self wordEmbedding_path data_path extract_path<block_start>print("handling external word embedding to file")<line_sep>self.wordEmbedding_path=wordEmbedding_path<line_sep>self.data_path=data_path<line_sep>self.extract_path=extract_path<line_sep>self.data_dict=self.read_data(data_path)<line_sep>self.extract_dict={}<line_sep>self.dim=100<line_sep>self.read_vectors(self.wordEmbedding_path)<line_sep>self.write(self.extract_path self.extract_dict)<line_sep># print(self.data_dict) # print(self.extract_dict) <block_end><def_stmt>read_data self path<block_start>print("read data file {}".format(path))<line_sep>data_list=[]<with_stmt>open(path encoding="UTF-8")<as>f<block_start><for_stmt>line f.readlines()<block_start>line=line.strip("\n").split(" ")[:-2]<line_sep>data_list.extend(line)<block_end><block_end><return>set(data_list)<block_end><def_stmt>read_vectors self path<block_start>print("read embedding path {}".format(path))<with_stmt>open(path encoding='utf-8')<as>f<block_start>lines=f.readlines()<line_sep>self.dim=len(lines[2].strip("\n").strip().split(" ")[1:-1])<line_sep># print(dim) lines=tqdm.tqdm(lines)<for_stmt>line lines<block_start>values=line.strip("\n").strip().split(" ")<if_stmt>len(values)<eq>1<or>len(values)<eq>2<or>len(values)<eq>3<block_start><continue><block_end>word,vector=values[0] values[1:-1]<if_stmt>word<in>self.data_dict<block_start>self.extract_dict[word]=vector<block_end><block_end><block_end><block_end><def_stmt>write self path dict<block_start>print("writing to {}".format(path))<if_stmt>os.path.exists(path)<block_start>os.remove(path)<block_end>file=open(path encoding="UTF-8" mode="w")<line_sep>all_words,dim=len(dict) self.dim<line_sep>print(all_words dim)<line_sep>file.write(str(all_words)+" "+str(dim)+"\n")<for_stmt>word dict<block_start>value=" ".join(dict[word])<line_sep>v=word+" "+value+"\n"<line_sep>file.write(v)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>wordEmbedding_path="GoogleNews_wordEmbedding/vectors.utf-8"<line_sep>data_path="./sst_all.txt"<line_sep>extract_path="./extract_googleNews_embed_sst.txt"<line_sep>WordEmbedding2File(wordEmbedding_path=wordEmbedding_path data_path=data_path extract_path=extract_path)<block_end>
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE <import_stmt>os<def_stmt>configuration parent_package='' top_path=<none><block_start><import_from_stmt>numpy.distutils.misc_util Configuration<line_sep>config=Configuration('megaman' parent_package top_path)<line_sep>config.add_subpackage('__check_build')<line_sep>config.add_subpackage('datasets')<line_sep>config.add_subpackage('embedding')<line_sep>config.add_subpackage('embedding/tests')<line_sep>config.add_subpackage('geometry')<line_sep>config.add_subpackage('geometry/cyflann')<line_sep>config.add_subpackage('geometry/tests')<line_sep>config.add_subpackage('plotter')<line_sep>config.add_subpackage('relaxation')<line_sep>config.add_subpackage('relaxation/tests')<line_sep>config.add_subpackage('utils')<line_sep>config.add_subpackage('utils/tests')<line_sep>config.add_data_files('geometry/tests/testmegaman_laplacian_rad0_2_lam1_5_n200.mat')<line_sep>config.add_data_files('relaxation/tests/eps_halfdome.mat')<line_sep>config.add_data_files('relaxation/tests/rloss_halfdome.mat')<line_sep>config.add_data_files('datasets/megaman.png')<line_sep><return>config<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>numpy.distutils.core setup<line_sep>setup(**configuration(top_path='').todict())<block_end>
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # vim: tabstop=4 shiftwidth=4 softtabstop=4 <import_stmt>unittest<import_stmt>logging<import_from_stmt>nose.tools *<import_from_stmt>ryu.ofproto.ofproto_v1_0_parser *<import_from_stmt>ryu.ofproto ofproto_v1_0_parser<import_from_stmt>ryu.lib addrconv<line_sep>LOG=logging.getLogger('test_ofproto_v10')<class_stmt>TestOFPPhyPort(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPhyPort """<line_sep># OFP_PHY_PORT_PACK_STR # '!H6s16sIIIIII'... port_no, hw_addr, name, config, state # curr, advertised, supported, peer port_no={'buf':'\xe7\x6b' 'val':59243}<line_sep>hw_addr='52:54:54:10:20:99'<line_sep>name='name'.ljust(16)<line_sep>config={'buf':'\x84\xb6\x8c\x53' 'val':2226555987}<line_sep>state={'buf':'\x64\x07\xfb\xc9' 'val':1678244809}<line_sep>curr={'buf':'\xa9\xe8\x0a\x2b' 'val':2850556459}<line_sep>advertised={'buf':'\x78\xb9\x7b\x72' 'val':2025421682}<line_sep>supported={'buf':'\x7e\x65\x68\xad' 'val':2120575149}<line_sep>peer={'buf':'\xa4\x5b\x8b\xed' 'val':2757463021}<line_sep>buf=port_no['buf']+addrconv.mac.text_to_bin(hw_addr)+name+config['buf']+state['buf']+curr['buf']+advertised['buf']+supported['buf']+peer['buf']<line_sep>c=OFPPhyPort(port_no['val'] hw_addr name config['val'] state['val'] curr['val'] advertised['val'] supported['val'] peer['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.port_no['val'] self.c.port_no)<line_sep>eq_(self.hw_addr self.c.hw_addr)<line_sep>eq_(self.name self.c.name)<line_sep>eq_(self.config['val'] self.c.config)<line_sep>eq_(self.state['val'] self.c.state)<line_sep>eq_(self.curr['val'] self.c.curr)<line_sep>eq_(self.advertised['val'] self.c.advertised)<line_sep>eq_(self.supported['val'] self.c.supported)<line_sep>eq_(self.peer['val'] self.c.peer)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.port_no['val'] res.port_no)<line_sep>eq_(self.hw_addr res.hw_addr)<line_sep>eq_(self.name res.name)<line_sep>eq_(self.config['val'] res.config)<line_sep>eq_(self.state['val'] res.state)<line_sep>eq_(self.curr['val'] res.curr)<line_sep>eq_(self.advertised['val'] res.advertised)<line_sep>eq_(self.supported['val'] res.supported)<line_sep>eq_(self.peer['val'] res.peer)<block_end><block_end><class_stmt>TestOFPMatch(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPMatch """<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan, # dl_vlan_pcp, dl_type, nw_tos, nw_proto, # nw_src, nw_dst, tp_src, tp_dst wildcards={'buf':'\xd2\x71\x25\x23' 'val':3530630435}<line_sep>in_port={'buf':'\x37\x8b' 'val':14219}<line_sep>dl_src='\x52\x54\x54\x10\x20\x99'<line_sep>dl_dst='\x61\x31\x50\x6d\xc9\xe5'<line_sep>dl_vlan={'buf':'\xc1\xf9' 'val':49657}<line_sep>dl_vlan_pcp={'buf':'\x79' 'val':121}<line_sep>zfill0='\x00'<line_sep>dl_type={'buf':'\xa6\x9e' 'val':42654}<line_sep>nw_tos={'buf':'\xde' 'val':222}<line_sep>nw_proto={'buf':'\xe5' 'val':229}<line_sep>zfil11='\x00'<times>2<line_sep>nw_src={'buf':'\x1b\x6d\x8d\x4b' 'val':460164427}<line_sep>nw_dst={'buf':'\xab\x25\xe1\x20' 'val':2871386400}<line_sep>tp_src={'buf':'\xd5\xc3' 'val':54723}<line_sep>tp_dst={'buf':'\x78\xb9' 'val':30905}<line_sep>buf=wildcards['buf']+in_port['buf']+dl_src+dl_dst+dl_vlan['buf']+dl_vlan_pcp['buf']+zfill0+dl_type['buf']+nw_tos['buf']+nw_proto['buf']+zfil11+nw_src['buf']+nw_dst['buf']+tp_src['buf']+tp_dst['buf']<def_stmt>_get_obj self dl_src dl_dst<block_start>c=OFPMatch(self.wildcards['val'] self.in_port['val'] dl_src dl_dst self.dl_vlan['val'] self.dl_vlan_pcp['val'] self.dl_type['val'] self.nw_tos['val'] self.nw_proto['val'] self.nw_src['val'] self.nw_dst['val'] self.tp_src['val'] self.tp_dst['val'])<line_sep><return>c<block_end><def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>c=self._get_obj(self.dl_src self.dl_dst)<line_sep>eq_(self.wildcards['val'] c.wildcards)<line_sep>eq_(self.in_port['val'] c.in_port)<line_sep>eq_(self.dl_src c.dl_src)<line_sep>eq_(self.dl_dst c.dl_dst)<line_sep>eq_(self.dl_vlan['val'] c.dl_vlan)<line_sep>eq_(self.dl_vlan_pcp['val'] c.dl_vlan_pcp)<line_sep>eq_(self.dl_type['val'] c.dl_type)<line_sep>eq_(self.nw_tos['val'] c.nw_tos)<line_sep>eq_(self.nw_proto['val'] c.nw_proto)<line_sep>eq_(self.nw_src['val'] c.nw_src)<line_sep>eq_(self.nw_dst['val'] c.nw_dst)<line_sep>eq_(self.tp_src['val'] c.tp_src)<line_sep>eq_(self.tp_dst['val'] c.tp_dst)<block_end><def_stmt>test_init_zero self<block_start>c=self._get_obj(0 0)<line_sep>eq_(mac.DONTCARE c.dl_src)<line_sep>eq_(mac.DONTCARE c.dl_dst)<block_end><def_stmt>test_parse self<block_start>c=self._get_obj(self.dl_src self.dl_dst)<line_sep>res=c.parse(self.buf 0)<line_sep>eq_(self.wildcards['val'] res.wildcards)<line_sep>eq_(self.in_port['val'] res.in_port)<line_sep>eq_(self.dl_src res.dl_src)<line_sep>eq_(self.dl_dst res.dl_dst)<line_sep>eq_(self.dl_vlan['val'] res.dl_vlan)<line_sep>eq_(self.dl_vlan_pcp['val'] res.dl_vlan_pcp)<line_sep>eq_(self.dl_type['val'] res.dl_type)<line_sep>eq_(self.nw_tos['val'] res.nw_tos)<line_sep>eq_(self.nw_proto['val'] res.nw_proto)<line_sep>eq_(self.nw_src['val'] res.nw_src)<line_sep>eq_(self.nw_dst['val'] res.nw_dst)<line_sep>eq_(self.tp_src['val'] res.tp_src)<line_sep>eq_(self.tp_dst['val'] res.tp_dst)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>c=self._get_obj(self.dl_src self.dl_dst)<line_sep>c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_MATCH_PACK_STR<line_sep>res=struct.unpack_from(fmt buffer(buf))<line_sep>eq_(self.wildcards['val'] res[0])<line_sep>eq_(self.in_port['val'] res[1])<line_sep>eq_(self.dl_src res[2])<line_sep>eq_(self.dl_dst res[3])<line_sep>eq_(self.dl_vlan['val'] res[4])<line_sep>eq_(self.dl_vlan_pcp['val'] res[5])<line_sep>eq_(self.dl_type['val'] res[6])<line_sep>eq_(self.nw_tos['val'] res[7])<line_sep>eq_(self.nw_proto['val'] res[8])<line_sep>eq_(self.nw_src['val'] res[9])<line_sep>eq_(self.nw_dst['val'] res[10])<line_sep>eq_(self.tp_src['val'] res[11])<line_sep>eq_(self.tp_dst['val'] res[12])<block_end><block_end><class_stmt>TestOFPActionHeader(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionHeader """<line_sep># OFP_ACTION_HEADER_PACK_STR # '!HH4x'...type, len, zfill type={'buf':'\x00\x02' 'val':ofproto.OFPAT_SET_VLAN_PCP}<line_sep>len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_HEADER_SIZE}<line_sep>zfill='\x00'<times>4<line_sep>buf=type['buf']+len['buf']+zfill<line_sep>c=OFPActionHeader(type['val'] len['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type['val'] self.c.type)<line_sep>eq_(self.len['val'] self.c.len)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_HEADER_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type['val'] res[0])<line_sep>eq_(self.len['val'] res[1])<block_end><block_end><class_stmt>TestOFPActionOutput(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionOutput """<line_sep># OFP_ACTION_OUTPUT_PACK_STR # '!HHHH'...type, len, port, max_len type_={'buf':'\x00\x00' 'val':ofproto.OFPAT_OUTPUT}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>port={'buf':'\x19\xce' 'val':6606}<line_sep>max_len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>buf=type_['buf']+len_['buf']+port['buf']+max_len['buf']<line_sep>c=OFPActionOutput(port['val'] max_len['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.port['val'] self.c.port)<line_sep>eq_(self.max_len['val'] self.c.max_len)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.port['val'] res.port)<line_sep>eq_(self.max_len['val'] res.max_len)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x01' 'val':1}<line_sep>buf=type_['buf']+self.len_['buf']+self.port['buf']+self.max_len['buf']<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.port['buf']+self.max_len['buf']<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_OUTPUT_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.port['val'] res[2])<line_sep>eq_(self.max_len['val'] res[3])<block_end><block_end><class_stmt>TestOFPActionVlanVid(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionVlanVid """<line_sep># OFP_ACTION_VLAN_VID_PACK_STR # '!HHH2x'...type, len, vlan_vid, zfill type_={'buf':'\x00\x01' 'val':ofproto.OFPAT_SET_VLAN_VID}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_VLAN_VID_SIZE}<line_sep>vlan_vid={'buf':'\x3c\x0e' 'val':15374}<line_sep>zfill='\x00'<times>2<line_sep>buf=type_['buf']+len_['buf']+vlan_vid['buf']+zfill<line_sep>c=OFPActionVlanVid(vlan_vid['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.vlan_vid['val'] self.c.vlan_vid)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.vlan_vid['val'] res.vlan_vid)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x02' 'val':2}<line_sep>buf=type_['buf']+self.len_['buf']+self.vlan_vid['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.vlan_vid['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_VLAN_VID_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vlan_vid['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionVlanPcp(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionVlanPcp """<line_sep># OFP_ACTION_VLAN_PCP_PACK_STR # '!HHB3x'...type, len, vlan_pcp, zfill type_={'buf':'\x00\x02' 'val':ofproto.OFPAT_SET_VLAN_PCP}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_VLAN_PCP_SIZE}<line_sep>vlan_pcp={'buf':'\x1c' 'val':28}<line_sep>zfill='\x00'<times>3<line_sep>buf=type_['buf']+len_['buf']+vlan_pcp['buf']+zfill<line_sep>c=OFPActionVlanPcp(vlan_pcp['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.vlan_pcp['val'] self.c.vlan_pcp)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.vlan_pcp['val'] res.vlan_pcp)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x01' 'val':1}<line_sep>buf=type_['buf']+self.len_['buf']+self.vlan_pcp['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.vlan_pcp['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_VLAN_PCP_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vlan_pcp['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionStripVlan(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionStripVlan """<line_sep># OFP_ACTION_HEADER_PACK_STR # '!HH4x'...type, len, zfill type_={'buf':'\x00\x03' 'val':ofproto.OFPAT_STRIP_VLAN}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_HEADER_SIZE}<line_sep>zfill='\x00'<times>4<line_sep>buf=type_['buf']+len_['buf']+zfill<line_sep>c=OFPActionStripVlan()<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>ok_(self.c.parser(self.buf 0))<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x01' 'val':1}<line_sep>buf=type_['buf']+self.len_['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end><block_end><class_stmt>TestOFPActionSetDlSrc(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetDlSrc """<line_sep># OFP_ACTION_DL_ADDR_PACK_STR # '!HH6s6x'...type, len, dl_addr, zfill type_={'buf':'\x00\x04' 'val':ofproto.OFPAT_SET_DL_SRC}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.OFP_ACTION_DL_ADDR_SIZE}<line_sep>dl_addr='\x0e\xde\x27\xce\xc6\xcf'<line_sep>zfill='\x00'<times>6<line_sep>buf=type_['buf']+len_['buf']+dl_addr+zfill<line_sep>c=OFPActionSetDlSrc(dl_addr)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.dl_addr self.c.dl_addr)<block_end><def_stmt>test_parser_type_src self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.dl_addr res.dl_addr)<block_end><def_stmt>test_parser_type_dst self<block_start>type_={'buf':'\x00\x05' 'val':ofproto.OFPAT_SET_DL_DST}<line_sep>buf=type_['buf']+self.len_['buf']+self.dl_addr+self.zfill<line_sep>res=self.c.parser(buf 0)<line_sep>eq_(self.dl_addr res.dl_addr)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x06' 'val':6}<line_sep>buf=type_['buf']+self.len_['buf']+self.dl_addr+self.zfill<line_sep>res=self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.dl_addr+self.zfill<line_sep>res=self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_DL_ADDR_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.dl_addr res[2])<block_end><block_end><class_stmt>TestOFPActionSetDlDst(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetDlDst """<line_sep># OFP_ACTION_DL_ADDR_PACK_STR # '!HH6s6x'...type, len, dl_addr, zfill type_={'buf':'\x00\x05' 'val':ofproto.OFPAT_SET_DL_DST}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.OFP_ACTION_DL_ADDR_SIZE}<line_sep>dl_addr='\x37\x48\x38\x9a\xf4\x28'<line_sep>zfill='\x00'<times>6<line_sep>buf=type_['buf']+len_['buf']+dl_addr+zfill<line_sep>c=OFPActionSetDlDst(dl_addr)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.dl_addr self.c.dl_addr)<block_end><def_stmt>test_parser_type_dst self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.dl_addr res.dl_addr)<block_end><def_stmt>test_parser_type_src self<block_start>type_={'buf':'\x00\x04' 'val':ofproto.OFPAT_SET_DL_SRC}<line_sep>buf=type_['buf']+self.len_['buf']+self.dl_addr+self.zfill<line_sep>res=self.c.parser(buf 0)<line_sep>eq_(self.dl_addr res.dl_addr)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x06' 'val':6}<line_sep>buf=type_['buf']+self.len_['buf']+self.dl_addr+self.zfill<line_sep>res=self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.dl_addr+self.zfill<line_sep>res=self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_DL_ADDR_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.dl_addr res[2])<block_end><block_end><class_stmt>TestOFPActionSetNwSrc(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetNwSrc """<line_sep># OFP_ACTION_NW_ADDR_PACK_STR # '!HHI'...type, len, nw_addr type_={'buf':'\x00\x06' 'val':ofproto.OFPAT_SET_NW_SRC}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_NW_ADDR_SIZE}<line_sep>nw_addr={'buf':'\xc0\xa8\x7a\x0a' 'val':3232266762}<line_sep>buf=type_['buf']+len_['buf']+nw_addr['buf']<line_sep>c=OFPActionSetNwSrc(nw_addr['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.nw_addr['val'] self.c.nw_addr)<block_end><def_stmt>test_parser_src self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.nw_addr['val'] res.nw_addr)<block_end><def_stmt>test_parser_dst self<block_start>type_={'buf':'\x00\x07' 'val':ofproto.OFPAT_SET_NW_DST}<line_sep>buf=type_['buf']+self.len_['buf']+self.nw_addr['buf']<line_sep>res=self.c.parser(buf 0)<line_sep>eq_(self.nw_addr['val'] res.nw_addr)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x05' 'val':5}<line_sep>buf=type_['buf']+self.len_['buf']+self.nw_addr['buf']<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x10' 'val':16}<line_sep>buf=self.type_['buf']+len_['buf']+self.nw_addr['buf']<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_NW_ADDR_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.nw_addr['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionSetNwDst(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetNwDst """<line_sep># OFP_ACTION_NW_ADDR_PACK_STR # '!HHI'...type, len, nw_addr type_={'buf':'\x00\x07' 'val':ofproto.OFPAT_SET_NW_DST}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_NW_ADDR_SIZE}<line_sep>nw_addr={'buf':'\xc0\xa8\x7a\x0a' 'val':3232266762}<line_sep>buf=type_['buf']+len_['buf']+nw_addr['buf']<line_sep>c=OFPActionSetNwDst(nw_addr['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.nw_addr['val'] self.c.nw_addr)<block_end><def_stmt>test_parser_dst self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.nw_addr['val'] res.nw_addr)<block_end><def_stmt>test_parser_src self<block_start>type_={'buf':'\x00\x06' 'val':ofproto.OFPAT_SET_NW_SRC}<line_sep>buf=type_['buf']+self.len_['buf']+self.nw_addr['buf']<line_sep>res=self.c.parser(buf 0)<line_sep>eq_(self.nw_addr['val'] res.nw_addr)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x05' 'val':5}<line_sep>buf=type_['buf']+self.len_['buf']+self.nw_addr['buf']<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x10' 'val':16}<line_sep>buf=self.type_['buf']+len_['buf']+self.nw_addr['buf']<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_NW_ADDR_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.nw_addr['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionSetNwTos(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetNwTos """<line_sep># OFP_ACTION_NW_TOS_PACK_STR # '!HHB3x'...type, len, tos, zfill type_={'buf':'\x00\x08' 'val':ofproto.OFPAT_SET_NW_TOS}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_NW_TOS_SIZE}<line_sep>tos={'buf':'\xb6' 'val':182}<line_sep>zfill='\x00'<times>3<line_sep>buf=type_['buf']+len_['buf']+tos['buf']+zfill<line_sep>c=OFPActionSetNwTos(tos['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.tos['val'] self.c.tos)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.tos['val'] res.tos)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x05' 'val':5}<line_sep>buf=type_['buf']+self.len_['buf']+self.tos['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.tos['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_NW_TOS_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.tos['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionSetTpSrc(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetTpSrc """<line_sep># OFP_ACTION_TP_PORT_PACK_STR # '!HHH2x'...type, len, tp, zfill type_={'buf':'\x00\x09' 'val':ofproto.OFPAT_SET_TP_SRC}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_TP_PORT_SIZE}<line_sep>tp={'buf':'\x07\xf1' 'val':2033}<line_sep>zfill='\x00'<times>2<line_sep>buf=type_['buf']+len_['buf']+tp['buf']+zfill<line_sep>c=OFPActionSetTpSrc(tp['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.tp['val'] self.c.tp)<block_end><def_stmt>test_parser_src self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.tp['val'] res.tp)<block_end><def_stmt>test_parser_dst self<block_start>type_={'buf':'\x00\x0a' 'val':ofproto.OFPAT_SET_TP_DST}<line_sep>buf=type_['buf']+self.len_['buf']+self.tp['buf']+self.zfill<line_sep>res=self.c.parser(self.buf 0)<line_sep>eq_(self.tp['val'] res.tp)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x07' 'val':7}<line_sep>buf=type_['buf']+self.len_['buf']+self.tp['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.tp['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_TP_PORT_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.tp['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionSetTpDst(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionSetTpDst """<line_sep># OFP_ACTION_TP_PORT_PACK_STR # '!HHH2x'...type, len, tp, zfill type_={'buf':'\x00\x0a' 'val':ofproto.OFPAT_SET_TP_DST}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_TP_PORT_SIZE}<line_sep>tp={'buf':'\x06\x6d' 'val':1645}<line_sep>zfill='\x00'<times>2<line_sep>buf=type_['buf']+len_['buf']+tp['buf']+zfill<line_sep>c=OFPActionSetTpDst(tp['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.tp['val'] self.c.tp)<block_end><def_stmt>test_parser_dst self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.tp['val'] res.tp)<block_end><def_stmt>test_parser_src self<block_start>type_={'buf':'\x00\x09' 'val':ofproto.OFPAT_SET_TP_SRC}<line_sep>buf=type_['buf']+self.len_['buf']+self.tp['buf']+self.zfill<line_sep>res=self.c.parser(buf 0)<line_sep>eq_(self.tp['val'] res.tp)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x10' 'val':16}<line_sep>buf=type_['buf']+self.len_['buf']+self.tp['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x07' 'val':7}<line_sep>buf=self.type_['buf']+len_['buf']+self.tp['buf']+self.zfill<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_TP_PORT_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.tp['val'] res[2])<block_end><block_end><class_stmt>TestOFPActionEnqueue(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPActionEnqueue """<line_sep># OFP_ACTION_ENQUEUE_PACK_STR # '!HHH6xI'...type_, len_, port, zfill, queue_id type_={'buf':'\x00\x0b' 'val':ofproto.OFPAT_ENQUEUE}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.OFP_ACTION_ENQUEUE_SIZE}<line_sep>port={'buf':'\x04\x55' 'val':1109}<line_sep>zfill='\x00'<times>6<line_sep>queue_id={'buf':'\x0a\x5b\x03\x5e' 'val':173736798}<line_sep>buf=type_['buf']+len_['buf']+port['buf']+zfill+queue_id['buf']<line_sep>c=OFPActionEnqueue(port['val'] queue_id['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.port['val'] self.c.port)<line_sep>eq_(self.queue_id['val'] self.c.queue_id)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.port['val'] res.port)<line_sep>eq_(self.queue_id['val'] res.queue_id)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_type self<block_start>type_={'buf':'\x00\x0a' 'val':10}<line_sep>buf=type_['buf']+self.len_['buf']+self.port['buf']+self.zfill+self.queue_id['buf']<line_sep>self.c.parser(buf 0)<block_end>@raises(AssertionError)<def_stmt>test_parser_check_len self<block_start>len_={'buf':'\x00\x05' 'val':5}<line_sep>buf=self.type_['buf']+len_['buf']+self.port['buf']+self.zfill+self.queue_id['buf']<line_sep>self.c.parser(buf 0)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.OFP_ACTION_ENQUEUE_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.port['val'] res[2])<line_sep>eq_(self.queue_id['val'] res[3])<block_end><block_end><class_stmt>TestNXActionResubmit(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionResubmit """<line_sep># NX_ACTION_RESUBMIT_PACK_STR # '!HHIHHB3x'...type, len, vendor, subtype, in_port, table, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.NX_ACTION_RESUBMIT_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':8992}<line_sep>subtype={'buf':'\x00\x01' 'val':1}<line_sep>in_port={'buf':'\x0a\x4c' 'val':2636}<line_sep>table={'buf':'\x52' 'val':82}<line_sep>zfill='\x00'<times>3<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+in_port['buf']+table['buf']+zfill<line_sep>c=NXActionResubmit(in_port['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.in_port['val'] self.c.in_port)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.in_port['val'] res.in_port)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_RESUBMIT_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.in_port['val'] res[4])<block_end><block_end><class_stmt>TestNXActionResubmitTable(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionResubmitTable """<line_sep># NX_ACTION_RESUBMIT_PACK_STR # '!HHIHHB3x'...type, len, vendor, subtype, in_port, table, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.NX_ACTION_RESUBMIT_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':8992}<line_sep>subtype={'buf':'\x00\x0e' 'val':14}<line_sep>in_port={'buf':'\x0a\x4c' 'val':2636}<line_sep>table={'buf':'\x52' 'val':82}<line_sep>zfill='\x00'<times>3<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+in_port['buf']+table['buf']+zfill<line_sep>c=NXActionResubmitTable(in_port['val'] table['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.in_port['val'] self.c.in_port)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.in_port['val'] res.in_port)<line_sep>eq_(self.table['val'] res.table)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_RESUBMIT_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.in_port['val'] res[4])<block_end><block_end><class_stmt>TestNXActionSetTunnel(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionSetTunnel """<line_sep># NX_ACTION_SET_TUNNEL_PACK_STR # '!HHIH2xI'...type, len, vendor, subtype, zfill, tun_id type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.NX_ACTION_SET_TUNNEL_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':8992}<line_sep>subtype={'buf':'\x00\x02' 'val':2}<line_sep>zfill='\x00'<times>2<line_sep>tun_id={'buf':'\x01\x6f\x01\xd0' 'val':24052176}<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+zfill+tun_id['buf']<line_sep>c=NXActionSetTunnel(tun_id['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.tun_id['val'] self.c.tun_id)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.tun_id['val'] res.tun_id)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_SET_TUNNEL_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.tun_id['val'] res[4])<block_end><block_end><class_stmt>TestNXActionSetQueue(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionSetQueue """<line_sep># NX_ACTION_SET_QUEUE_PACK_STR # '!HHIH2xI'...type, len, vendor, subtype, zfill, queue_id type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.NX_ACTION_SET_TUNNEL_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x04' 'val':ofproto.NXAST_SET_QUEUE}<line_sep>zfill='\x00'<times>2<line_sep>queue_id={'buf':'\xde\xbe\xc5\x18' 'val':3737044248}<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+zfill+queue_id['buf']<line_sep>c=NXActionSetQueue(queue_id['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.queue_id['val'] self.c.queue_id)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.queue_id['val'] res.queue_id)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_SET_QUEUE_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.queue_id['val'] res[4])<block_end><block_end><class_stmt>TestNXActionPopQueue(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionPopQueue """<line_sep># NX_ACTION_POP_QUEUE_PACK_STR # '!HHIH6x'...type, len, vendor, subtype, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.NX_ACTION_SET_TUNNEL_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x05' 'val':ofproto.NXAST_POP_QUEUE}<line_sep>zfill='\x00'<times>6<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+zfill<line_sep>c=NXActionPopQueue()<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.type_['val'] res.type)<line_sep>eq_(self.len_['val'] res.len)<line_sep>eq_(self.vendor['val'] res.vendor)<line_sep>eq_(self.subtype['val'] res.subtype)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_POP_QUEUE_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<block_end><block_end><class_stmt>TestNXActionRegMove(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionRegMove """<line_sep># NX_ACTION_REG_MOVE_PACK_STR # '!HHIHHHHII'...type_, len_, vendor, subtype, n_bits, # src_ofs, dst_ofs, src, dst type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x18' 'val':ofproto.NX_ACTION_REG_MOVE_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x06' 'val':ofproto.NXAST_REG_MOVE}<line_sep>n_bits={'buf':'\x3d\x98' 'val':15768}<line_sep>src_ofs={'buf':'\xf3\xa3' 'val':62371}<line_sep>dst_ofs={'buf':'\xdc\x67' 'val':56423}<line_sep>src={'buf':'\x15\x68\x60\xfd' 'val':359162109}<line_sep>dst={'buf':'\x9f\x9f\x88\x26' 'val':2678032422}<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+n_bits['buf']+src_ofs['buf']+dst_ofs['buf']+src['buf']+dst['buf']<line_sep>c=NXActionRegMove(n_bits['val'] src_ofs['val'] dst_ofs['val'] src['val'] dst['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.n_bits['val'] self.c.n_bits)<line_sep>eq_(self.src_ofs['val'] self.c.src_ofs)<line_sep>eq_(self.dst_ofs['val'] self.c.dst_ofs)<line_sep>eq_(self.src['val'] self.c.src)<line_sep>eq_(self.dst['val'] self.c.dst)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.n_bits['val'] res.n_bits)<line_sep>eq_(self.src_ofs['val'] res.src_ofs)<line_sep>eq_(self.dst_ofs['val'] res.dst_ofs)<line_sep>eq_(self.src['val'] res.src)<line_sep>eq_(self.dst['val'] res.dst)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_REG_MOVE_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.n_bits['val'] res[4])<line_sep>eq_(self.src_ofs['val'] res[5])<line_sep>eq_(self.dst_ofs['val'] res[6])<line_sep>eq_(self.src['val'] res[7])<line_sep>eq_(self.dst['val'] res[8])<block_end><block_end><class_stmt>TestNXActionRegLoad(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionRegLoad """<line_sep># NX_ACTION_REG_LOAD_PACK_STR # '!HHIHHIQ'...type_, len_, vendor, subtype, # ofs_nbits, dst, value type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x18' 'val':ofproto.NX_ACTION_REG_MOVE_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x07' 'val':ofproto.NXAST_REG_LOAD}<line_sep>ofs_nbits={'buf':'\x3d\x98' 'val':15768}<line_sep>dst={'buf':'\x9f\x9f\x88\x26' 'val':2678032422}<line_sep>value={'buf':'\x33\x51\xcd\x43\x25\x28\x18\x99' 'val':3697962457317775513}<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+ofs_nbits['buf']+dst['buf']+value['buf']<line_sep>c=NXActionRegLoad(ofs_nbits['val'] dst['val'] value['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.ofs_nbits['val'] self.c.ofs_nbits)<line_sep>eq_(self.dst['val'] self.c.dst)<line_sep>eq_(self.value['val'] self.c.value)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.ofs_nbits['val'] res.ofs_nbits)<line_sep>eq_(self.dst['val'] res.dst)<line_sep>eq_(self.value['val'] res.value)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_REG_LOAD_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.ofs_nbits['val'] res[4])<line_sep>eq_(self.dst['val'] res[5])<line_sep>eq_(self.value['val'] res[6])<block_end><block_end><class_stmt>TestNXActionSetTunnel64(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionSetTunnel64 """<line_sep># NX_ACTION_SET_TUNNEL64_PACK_STR # '!HHIH6xQ'...type, len, vendor, subtype, zfill, tun_id type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x18' 'val':ofproto.NX_ACTION_SET_TUNNEL64_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x09' 'val':ofproto.NXAST_SET_TUNNEL64}<line_sep>zfill='\x00'<times>6<line_sep>tun_id={'buf':'\x6e\x01\xa6\xea\x7e\x36\x1d\xd9' 'val':7926800345218817497}<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+zfill+tun_id['buf']<line_sep>c=NXActionSetTunnel64(tun_id['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.tun_id['val'] self.c.tun_id)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.tun_id['val'] self.c.tun_id)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_SET_TUNNEL64_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.tun_id['val'] res[4])<block_end><block_end><class_stmt>TestNXActionMultipath(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionMultipath """<line_sep># NX_ACTION_MULTIPATH_PACK_STR # '!HHIHHH2xHHI2xHI'...type, len, vendor, subtype, fields, basis, zfill # algorithm, max_link, arg, zfill, ofs_nbits, dst type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x20' 'val':ofproto.NX_ACTION_MULTIPATH_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x0a' 'val':ofproto.NXAST_MULTIPATH}<line_sep>fields={'buf':'\x6d\xf5' 'val':28149}<line_sep>basis={'buf':'\x7c\x0a' 'val':31754}<line_sep>zfill0='\x00'<times>2<line_sep>algorithm={'buf':'\x82\x1d' 'val':33309}<line_sep>max_link={'buf':'\x06\x2b' 'val':1579}<line_sep>arg={'buf':'\x18\x79\x41\xc8' 'val':410599880}<line_sep>zfill1='\x00'<times>2<line_sep>ofs_nbits={'buf':'\xa9\x9a' 'val':43418}<line_sep>dst={'buf':'\xb9\x2f\x16\x64' 'val':3106870884}<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+fields['buf']+basis['buf']+zfill0+algorithm['buf']+max_link['buf']+arg['buf']+zfill1+ofs_nbits['buf']+dst['buf']<line_sep>c=NXActionMultipath(fields['val'] basis['val'] algorithm['val'] max_link['val'] arg['val'] ofs_nbits['val'] dst['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.fields['val'] self.c.fields)<line_sep>eq_(self.basis['val'] self.c.basis)<line_sep>eq_(self.algorithm['val'] self.c.algorithm)<line_sep>eq_(self.max_link['val'] self.c.max_link)<line_sep>eq_(self.arg['val'] self.c.arg)<line_sep>eq_(self.ofs_nbits['val'] self.c.ofs_nbits)<line_sep>eq_(self.dst['val'] self.c.dst)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.fields['val'] res.fields)<line_sep>eq_(self.basis['val'] res.basis)<line_sep>eq_(self.algorithm['val'] res.algorithm)<line_sep>eq_(self.max_link['val'] res.max_link)<line_sep>eq_(self.arg['val'] res.arg)<line_sep>eq_(self.ofs_nbits['val'] res.ofs_nbits)<line_sep>eq_(self.dst['val'] res.dst)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_MULTIPATH_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.fields['val'] res[4])<line_sep>eq_(self.basis['val'] res[5])<line_sep>eq_(self.algorithm['val'] res[6])<line_sep>eq_(self.max_link['val'] res[7])<line_sep>eq_(self.arg['val'] res[8])<line_sep>eq_(self.ofs_nbits['val'] res[9])<line_sep>eq_(self.dst['val'] res[10])<block_end><block_end><class_stmt>TestNXActionBundle(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionBundle """<line_sep># NX_ACTION_BUNDLE_PACK_STR # '!HHIHHHHIHHI4x'...type, len, vendor, subtype, algorithm, # fields, basis, slave_type, n_slaves, # ofs_nbits, dst, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x20' 'val':ofproto.NX_ACTION_BUNDLE_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x0c' 'val':ofproto.NXAST_BUNDLE}<line_sep>algorithm={'buf':'\x51\xa7' 'val':20903}<line_sep>fields={'buf':'\xf8\xef' 'val':63727}<line_sep>basis={'buf':'\xfd\x6f' 'val':64879}<line_sep>slave_type={'buf':'\x7c\x51\x0f\xe0' 'val':2085687264}<line_sep>n_slaves={'buf':'\x00\x02' 'val':2}<line_sep>ofs_nbits={'buf':'\xec\xf7' 'val':60663}<line_sep>dst={'buf':'\x50\x7c\x75\xfe' 'val':1350333950}<line_sep>zfill='\x00'<times>4<line_sep>slaves_buf=('\x00\x01' '\x00\x02')<line_sep>slaves_val=(1 2)<line_sep>_len=len_['val']+len(slaves_val)<times>2<line_sep>_len<augadd>(_len%8)<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+algorithm['buf']+fields['buf']+basis['buf']+slave_type['buf']+n_slaves['buf']+ofs_nbits['buf']+dst['buf']+zfill+slaves_buf[0]+slaves_buf[1]<line_sep>c=NXActionBundle(algorithm['val'] fields['val'] basis['val'] slave_type['val'] n_slaves['val'] ofs_nbits['val'] dst['val'] slaves_val)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self._len self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.algorithm['val'] self.c.algorithm)<line_sep>eq_(self.fields['val'] self.c.fields)<line_sep>eq_(self.basis['val'] self.c.basis)<line_sep>eq_(self.slave_type['val'] self.c.slave_type)<line_sep>eq_(self.n_slaves['val'] self.c.n_slaves)<line_sep>eq_(self.ofs_nbits['val'] self.c.ofs_nbits)<line_sep>eq_(self.dst['val'] self.c.dst)<line_sep># slaves slaves=self.c.slaves<line_sep>eq_(self.slaves_val[0] slaves[0])<line_sep>eq_(self.slaves_val[1] slaves[1])<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.type_['val'] res.type)<line_sep>eq_(self._len res.len)<line_sep>eq_(self.vendor['val'] res.vendor)<line_sep>eq_(self.subtype['val'] res.subtype)<line_sep>eq_(self.algorithm['val'] res.algorithm)<line_sep>eq_(self.fields['val'] res.fields)<line_sep>eq_(self.basis['val'] res.basis)<line_sep>eq_(self.slave_type['val'] res.slave_type)<line_sep>eq_(self.n_slaves['val'] res.n_slaves)<line_sep>eq_(self.ofs_nbits['val'] res.ofs_nbits)<line_sep>eq_(self.dst['val'] res.dst)<line_sep># slaves slaves=res.slaves<line_sep>eq_(self.slaves_val[0] slaves[0])<line_sep>eq_(self.slaves_val[1] slaves[1])<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt='!'+ofproto.NX_ACTION_BUNDLE_PACK_STR.replace('!' '')+'HH4x'<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self._len res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.algorithm['val'] res[4])<line_sep>eq_(self.fields['val'] res[5])<line_sep>eq_(self.basis['val'] res[6])<line_sep>eq_(self.slave_type['val'] res[7])<line_sep>eq_(self.n_slaves['val'] res[8])<line_sep>eq_(self.ofs_nbits['val'] res[9])<line_sep>eq_(self.dst['val'] res[10])<block_end><block_end><class_stmt>TestNXActionBundleLoad(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionBundleLoad """<line_sep># NX_ACTION_BUNDLE_PACK_STR # '!HHIHHHHIHHI4x'...type, len, vendor, subtype, algorithm, # fields, basis, slave_type, n_slaves, # ofs_nbits, dst, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x20' 'val':ofproto.NX_ACTION_BUNDLE_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x0d' 'val':ofproto.NXAST_BUNDLE_LOAD}<line_sep>algorithm={'buf':'\x83\x15' 'val':33557}<line_sep>fields={'buf':'\xc2\x7a' 'val':49786}<line_sep>basis={'buf':'\x86\x18' 'val':34328}<line_sep>slave_type={'buf':'\x18\x42\x0b\x55' 'val':406981461}<line_sep>n_slaves={'buf':'\x00\x02' 'val':2}<line_sep>ofs_nbits={'buf':'\xd2\x9d' 'val':53917}<line_sep>dst={'buf':'\x37\xfe\xb3\x60' 'val':939438944}<line_sep>zfill='\x00'<times>4<line_sep>slaves_buf=('\x00\x01' '\x00\x02')<line_sep>slaves_val=(1 2)<line_sep>_len=len_['val']+len(slaves_val)<times>2<line_sep>_len<augadd>(_len%8)<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+algorithm['buf']+fields['buf']+basis['buf']+slave_type['buf']+n_slaves['buf']+ofs_nbits['buf']+dst['buf']+zfill+slaves_buf[0]+slaves_buf[1]<line_sep>c=NXActionBundleLoad(algorithm['val'] fields['val'] basis['val'] slave_type['val'] n_slaves['val'] ofs_nbits['val'] dst['val'] slaves_val)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self._len self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.algorithm['val'] self.c.algorithm)<line_sep>eq_(self.fields['val'] self.c.fields)<line_sep>eq_(self.basis['val'] self.c.basis)<line_sep>eq_(self.slave_type['val'] self.c.slave_type)<line_sep>eq_(self.n_slaves['val'] self.c.n_slaves)<line_sep>eq_(self.ofs_nbits['val'] self.c.ofs_nbits)<line_sep>eq_(self.dst['val'] self.c.dst)<line_sep># slaves slaves=self.c.slaves<line_sep>eq_(self.slaves_val[0] slaves[0])<line_sep>eq_(self.slaves_val[1] slaves[1])<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.type_['val'] res.type)<line_sep>eq_(self._len res.len)<line_sep>eq_(self.vendor['val'] res.vendor)<line_sep>eq_(self.subtype['val'] res.subtype)<line_sep>eq_(self.algorithm['val'] res.algorithm)<line_sep>eq_(self.fields['val'] res.fields)<line_sep>eq_(self.basis['val'] res.basis)<line_sep>eq_(self.slave_type['val'] res.slave_type)<line_sep>eq_(self.n_slaves['val'] res.n_slaves)<line_sep>eq_(self.ofs_nbits['val'] res.ofs_nbits)<line_sep>eq_(self.dst['val'] res.dst)<line_sep># slaves slaves=res.slaves<line_sep>eq_(self.slaves_val[0] slaves[0])<line_sep>eq_(self.slaves_val[1] slaves[1])<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt='!'+ofproto.NX_ACTION_BUNDLE_PACK_STR.replace('!' '')+'HH4x'<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self._len res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.algorithm['val'] res[4])<line_sep>eq_(self.fields['val'] res[5])<line_sep>eq_(self.basis['val'] res[6])<line_sep>eq_(self.slave_type['val'] res[7])<line_sep>eq_(self.n_slaves['val'] res[8])<line_sep>eq_(self.ofs_nbits['val'] res[9])<line_sep>eq_(self.dst['val'] res[10])<block_end><block_end><class_stmt>TestNXActionAutopath(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionAutopath """<line_sep># NX_ACTION_AUTOPATH_PACK_STR # '!HHIHHII4x'...type, len, vendor, subtype, ofs_nbits, # dst, id_, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x20' 'val':ofproto.NX_ACTION_OUTPUT_REG_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x0b' 'val':ofproto.NXAST_AUTOPATH}<line_sep>ofs_nbits={'buf':'\xfe\x78' 'val':65144}<line_sep>dst={'buf':'\xf8\x55\x74\x95' 'val':4166349973}<line_sep>id_={'buf':'\x02\x2d\x37\xed' 'val':36517869}<line_sep>zfill='\x00'<times>4<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+ofs_nbits['buf']+dst['buf']+id_['buf']+zfill<line_sep>c=NXActionAutopath(ofs_nbits['val'] dst['val'] id_['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.ofs_nbits['val'] self.c.ofs_nbits)<line_sep>eq_(self.dst['val'] self.c.dst)<line_sep>eq_(self.id_['val'] self.c.id)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.type_['val'] res.type)<line_sep>eq_(self.len_['val'] res.len)<line_sep>eq_(self.vendor['val'] res.vendor)<line_sep>eq_(self.subtype['val'] res.subtype)<line_sep>eq_(self.ofs_nbits['val'] res.ofs_nbits)<line_sep>eq_(self.dst['val'] res.dst)<line_sep>eq_(self.id_['val'] res.id)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_AUTOPATH_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.ofs_nbits['val'] res[4])<line_sep>eq_(self.dst['val'] res[5])<line_sep>eq_(self.id_['val'] res[6])<block_end><block_end><class_stmt>TestNXActionOutputReg(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionOutputReg """<line_sep># NX_ACTION_OUTPUT_REG_PACK_STR # '!HHIHHIH6x'...type, len, vendor, subtype, ofs_nbits, # src, max_len, zfill type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x20' 'val':ofproto.NX_ACTION_OUTPUT_REG_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x0f' 'val':ofproto.NXAST_OUTPUT_REG}<line_sep>ofs_nbits={'buf':'\xfe\x78' 'val':65144}<line_sep>src={'buf':'\x5e\x3a\x04\x26' 'val':1580860454}<line_sep>max_len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>zfill='\x00'<times>6<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+ofs_nbits['buf']+src['buf']+max_len['buf']+zfill<line_sep>c=NXActionOutputReg(ofs_nbits['val'] src['val'] max_len['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<line_sep>eq_(self.ofs_nbits['val'] self.c.ofs_nbits)<line_sep>eq_(self.src['val'] self.c.src)<line_sep>eq_(self.max_len['val'] self.c.max_len)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.type_['val'] res.type)<line_sep>eq_(self.len_['val'] res.len)<line_sep>eq_(self.vendor['val'] res.vendor)<line_sep>eq_(self.subtype['val'] res.subtype)<line_sep>eq_(self.ofs_nbits['val'] res.ofs_nbits)<line_sep>eq_(self.src['val'] res.src)<line_sep>eq_(self.max_len['val'] res.max_len)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_OUTPUT_REG_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<line_sep>eq_(self.ofs_nbits['val'] res[4])<line_sep>eq_(self.src['val'] res[5])<line_sep>eq_(self.max_len['val'] res[6])<block_end><block_end><class_stmt>TestNXActionExit(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXActionExit """<line_sep># NX_ACTION_HEADER_PACK_STR # '!HHIH'...type, len, vendor, subtype type_={'buf':'\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>len_={'buf':'\x00\x10' 'val':ofproto.NX_ACTION_HEADER_SIZE}<line_sep>vendor={'buf':'\x00\x00\x23\x20' 'val':ofproto.NX_VENDOR_ID}<line_sep>subtype={'buf':'\x00\x11' 'val':ofproto.NXAST_EXIT}<line_sep>zfill='\x00'<times>6<line_sep>buf=type_['buf']+len_['buf']+vendor['buf']+subtype['buf']+zfill<line_sep>c=NXActionExit()<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.type_['val'] self.c.type)<line_sep>eq_(self.len_['val'] self.c.len)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.subtype['val'] self.c.subtype)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.type_['val'] res.type)<line_sep>eq_(self.len_['val'] res.len)<line_sep>eq_(self.vendor['val'] res.vendor)<line_sep>eq_(self.subtype['val'] res.subtype)<block_end><def_stmt>test_serialize self<block_start>buf=bytearray()<line_sep>self.c.serialize(buf 0)<line_sep>fmt=ofproto.NX_ACTION_HEADER_PACK_STR<line_sep>res=struct.unpack(fmt buffer(buf))<line_sep>eq_(self.type_['val'] res[0])<line_sep>eq_(self.len_['val'] res[1])<line_sep>eq_(self.vendor['val'] res[2])<line_sep>eq_(self.subtype['val'] res[3])<block_end><block_end><class_stmt>TestOFPDescStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPDescStats """<line_sep># OFP_DESC_STATS_PACK_STR # '!256s256s256s32s256s'...mfr_desc, hw_desc, sw_desc, serial_num, dp_desc mfr_desc='mfr_desc'.ljust(256)<line_sep>hw_desc='hw_desc'.ljust(256)<line_sep>sw_desc='sw_desc'.ljust(256)<line_sep>serial_num='serial_num'.ljust(32)<line_sep>dp_desc='dp_desc'.ljust(256)<line_sep>buf=mfr_desc+hw_desc+sw_desc+serial_num+dp_desc<line_sep>c=OFPDescStats(mfr_desc hw_desc sw_desc serial_num dp_desc)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.mfr_desc self.c.mfr_desc)<line_sep>eq_(self.hw_desc self.c.hw_desc)<line_sep>eq_(self.sw_desc self.c.sw_desc)<line_sep>eq_(self.serial_num self.c.serial_num)<line_sep>eq_(self.dp_desc self.c.dp_desc)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.mfr_desc self.mfr_desc)<line_sep>eq_(self.hw_desc self.hw_desc)<line_sep>eq_(self.sw_desc self.sw_desc)<line_sep>eq_(self.serial_num self.serial_num)<line_sep>eq_(self.dp_desc self.dp_desc)<block_end><block_end><class_stmt>TestOFPFlowStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPFlowStats """<line_sep># OFP_FLOW_STATS_0_PACK_STR # '!HBx'...length, table_id, zfill length={'buf':'\x00\x58' 'val':88}<line_sep>length_append_action={'buf':'\x00\x60' 'val':96}<line_sep>table_id={'buf':'\x51' 'val':81}<line_sep>zfill_0='\x00'<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'... match='\x97\x7c\xa6\x1e'+'\x5e\xa0'+'\x7a\x3e\xed\x30\x4a\x90'+'\x96\x8e\x67\xbe\x2f\xe2'+'\xb1\x81'+'\xbe'+'\x00'+'\x01\xab'+'\x42'+'\xfe'+'\x00\x00'+'\xa4\x5d\x5c\x42'+'\xa2\x5c\x2e\x05'+'\x5a\x94'+'\x64\xd4'<line_sep># OFP_FLOW_STATS_1_PACK_STR # '!IIHHH6xQQQ'...duration_sec, duration_nsec, priority, # idle_timeout, hard_timeout, zfill, # cookie, packet_count, byte_count duration_sec={'buf':'\x94\x19\xb3\xd2' 'val':2484712402}<line_sep>duration_nsec={'buf':'\xee\x66\xcf\x7c' 'val':3999715196}<line_sep>priority={'buf':'\xe1\xc0' 'val':57792}<line_sep>idle_timeout={'buf':'\x8e\x10' 'val':36368}<line_sep>hard_timeout={'buf':'\xd4\x99' 'val':54425}<line_sep>zfill_1='\x00\x00\x00\x00\x00\x00'<line_sep>cookie={'buf':'\x0b\x01\xe8\xe5\xf0\x84\x8a\xe0' 'val':793171083674290912}<line_sep>packet_count={'buf':'\x47\x5c\xc6\x05\x28\xff\x7c\xdb' 'val':5142202600015232219}<line_sep>byte_count={'buf':'\x24\xe9\x4b\xee\xcb\x57\xd9\xc3' 'val':2659740543924820419}<line_sep># <action>_PACK_STR...type_, len_ [others...] type={'buf':'\x00\x00' 'val':ofproto.OFPAT_OUTPUT}<line_sep>len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>port={'buf':'\x59\x2a' 'val':22826}<line_sep>max_len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>action=(type len port max_len)<line_sep>ACTION_TYPE=0<line_sep>ACTION_LEN=1<line_sep>ACTION_PORT=2<line_sep>ACTION_MAX_LEN=3<line_sep>c=OFPFlowStats()<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>_parser self action=<none><block_start>buf=self.table_id['buf']+self.zfill_0+self.match+self.duration_sec['buf']+self.duration_nsec['buf']+self.priority['buf']+self.idle_timeout['buf']+self.hard_timeout['buf']+self.zfill_1+self.cookie['buf']+self.packet_count['buf']+self.byte_count['buf']<if_stmt><not>action<block_start>buf=self.length['buf']+buf<block_end><else_stmt><block_start>buf=self.length_append_action['buf']+buf<for_stmt>a self.action<block_start>buf=buf+a['buf']<block_end><block_end><return>self.c.parser(buf 0)<block_end><def_stmt>test_parser self<block_start>res=self._parser()<line_sep>eq_(self.length['val'] res.length)<line_sep>eq_(self.table_id['val'] res.table_id)<line_sep>eq_(self.duration_sec['val'] res.duration_sec)<line_sep>eq_(self.duration_nsec['val'] res.duration_nsec)<line_sep>eq_(self.priority['val'] res.priority)<line_sep>eq_(self.idle_timeout['val'] res.idle_timeout)<line_sep>eq_(self.hard_timeout['val'] res.hard_timeout)<line_sep>eq_(self.cookie['val'] res.cookie)<line_sep>eq_(self.packet_count['val'] res.packet_count)<line_sep>eq_(self.byte_count['val'] res.byte_count)<block_end><def_stmt>test_parser_append_actions self<block_start>res=self._parser(<true>).actions[0]<line_sep>eq_(self.action[self.ACTION_TYPE]['val'] res.type)<line_sep>eq_(self.action[self.ACTION_LEN]['val'] res.len)<line_sep>eq_(self.action[self.ACTION_PORT]['val'] res.port)<line_sep>eq_(self.action[self.ACTION_MAX_LEN]['val'] res.max_len)<block_end><block_end><class_stmt>TestOFPAggregateStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPAggregateStats """<line_sep># OFP_AGGREGATE_STATS_REPLY_PACK_STR # '!QQI4x'...packet_count, byte_count, flow_count, zfill packet_count={'buf':'\x43\x95\x1b\xfb\x0f\xf6\xa7\xdd' 'val':4869829337189623773}<line_sep>byte_count={'buf':'\x36\xda\x2d\x80\x2a\x95\x35\xdd' 'val':3952521651464517085}<line_sep>flow_count={'buf':'\xc3\x0d\xc3\xed' 'val':3272459245}<line_sep>zfill='\x00'<times>4<line_sep>buf=packet_count['buf']+byte_count['buf']+flow_count['buf']+zfill<line_sep>c=OFPAggregateStats(packet_count['val'] byte_count['val'] flow_count['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.packet_count['val'] self.c.packet_count)<line_sep>eq_(self.byte_count['val'] self.c.byte_count)<line_sep>eq_(self.flow_count['val'] self.c.flow_count)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.packet_count['val'] res.packet_count)<line_sep>eq_(self.byte_count['val'] res.byte_count)<line_sep>eq_(self.flow_count['val'] res.flow_count)<block_end><block_end><class_stmt>TestOFPTableStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPTableStats """<line_sep># OFP_TABLE_STATS_PACK_STR # '!B3x32sIIIQQ'...table_id, zfill, name, wildcards, max_entries, # active_count, lookup_count, matched_count table_id={'buf':'\x5b' 'val':91}<line_sep>zfill='\x00'<times>3<line_sep>name='name'.ljust(32)<line_sep>wildcards={'buf':'\xc5\xaf\x6e\x12' 'val':3316608530}<line_sep>max_entries={'buf':'\x95\x6c\x78\x4d' 'val':2506913869}<line_sep>active_count={'buf':'\x78\xac\xa8\x1e' 'val':2024581150}<line_sep>lookup_count={'buf':'\x40\x1d\x9c\x39\x19\xec\xd4\x1c' 'val':4620020561814017052}<line_sep>matched_count={'buf':'\x27\x35\x02\xb6\xc5\x5e\x17\x65' 'val':2825167325263435621}<line_sep>buf=table_id['buf']+zfill+name+wildcards['buf']+max_entries['buf']+active_count['buf']+lookup_count['buf']+matched_count['buf']<line_sep>c=OFPTableStats(table_id['val'] name wildcards['val'] max_entries['val'] active_count['val'] lookup_count['val'] matched_count['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.table_id['val'] self.c.table_id)<line_sep>eq_(self.name self.c.name)<line_sep>eq_(self.wildcards['val'] self.c.wildcards)<line_sep>eq_(self.max_entries['val'] self.c.max_entries)<line_sep>eq_(self.active_count['val'] self.c.active_count)<line_sep>eq_(self.lookup_count['val'] self.c.lookup_count)<line_sep>eq_(self.matched_count['val'] self.c.matched_count)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.table_id['val'] res.table_id)<line_sep>eq_(self.name res.name)<line_sep>eq_(self.wildcards['val'] res.wildcards)<line_sep>eq_(self.max_entries['val'] res.max_entries)<line_sep>eq_(self.active_count['val'] res.active_count)<line_sep>eq_(self.lookup_count['val'] res.lookup_count)<line_sep>eq_(self.matched_count['val'] res.matched_count)<block_end><block_end><class_stmt>TestOFPPortStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPortStats """<line_sep># OFP_PORT_STATS_PACK_STR # '!H6xQQQQQQQQQQQQ'... port_no, zfill, rx_packets, tx_packets, # rx_bytes, tx_bytes, rx_dropped, tx_dropped, # rx_errors, tx_errors, rx_frame_err, # rx_over_err, rx_crc_err, collisions port_no={'buf':'\xe7\x6b' 'val':59243}<line_sep>zfill='\x00'<times>6<line_sep>rx_packets={'buf':'\x53\x44\x36\x61\xc4\x86\xc0\x37' 'val':5999980397101236279}<line_sep>tx_packets={'buf':'\x27\xa4\x41\xd7\xd4\x53\x9e\x42' 'val':2856480458895760962}<line_sep>rx_bytes={'buf':'\x55\xa1\x38\x60\x43\x97\x0d\x89' 'val':6170274950576278921}<line_sep>tx_bytes={'buf':'\x77\xe1\xd5\x63\x18\xae\x63\xaa' 'val':8638420181865882538}<line_sep>rx_dropped={'buf':'\x60\xe6\x20\x01\x24\xda\x4e\x5a' 'val':6982303461569875546}<line_sep>tx_dropped={'buf':'\x09\x2d\x5d\x71\x71\xb6\x8e\xc7' 'val':661287462113808071}<line_sep>rx_errors={'buf':'\x2f\x7e\x35\xb3\x66\x3c\x19\x0d' 'val':3422231811478788365}<line_sep>tx_errors={'buf':'\x57\x32\x08\x2f\x88\x32\x40\x6b' 'val':6283093430376743019}<line_sep>rx_frame_err={'buf':'\x0c\x28\x6f\xad\xce\x66\x6e\x8b' 'val':876072919806406283}<line_sep>rx_over_err={'buf':'\x5a\x90\x8f\x9b\xfc\x82\x2e\xa0' 'val':6525873760178941600}<line_sep>rx_crc_err={'buf':'\x73\x3a\x71\x17\xd6\x74\x69\x47' 'val':8303073210207070535}<line_sep>collisions={'buf':'\x2f\x52\x0c\x79\x96\x03\x6e\x79' 'val':3409801584220270201}<line_sep>buf=port_no['buf']+zfill+rx_packets['buf']+tx_packets['buf']+rx_bytes['buf']+tx_bytes['buf']+rx_dropped['buf']+tx_dropped['buf']+rx_errors['buf']+tx_errors['buf']+rx_frame_err['buf']+rx_over_err['buf']+rx_crc_err['buf']+collisions['buf']<line_sep>c=OFPPortStats(port_no['val'] rx_packets['val'] tx_packets['val'] rx_bytes['val'] tx_bytes['val'] rx_dropped['val'] tx_dropped['val'] rx_errors['val'] tx_errors['val'] rx_frame_err['val'] rx_over_err['val'] rx_crc_err['val'] collisions['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.port_no['val'] self.c.port_no)<line_sep>eq_(self.rx_packets['val'] self.c.rx_packets)<line_sep>eq_(self.tx_packets['val'] self.c.tx_packets)<line_sep>eq_(self.rx_bytes['val'] self.c.rx_bytes)<line_sep>eq_(self.tx_bytes['val'] self.c.tx_bytes)<line_sep>eq_(self.rx_dropped['val'] self.c.rx_dropped)<line_sep>eq_(self.tx_dropped['val'] self.c.tx_dropped)<line_sep>eq_(self.rx_errors['val'] self.c.rx_errors)<line_sep>eq_(self.tx_errors['val'] self.c.tx_errors)<line_sep>eq_(self.rx_frame_err['val'] self.c.rx_frame_err)<line_sep>eq_(self.rx_over_err['val'] self.c.rx_over_err)<line_sep>eq_(self.rx_crc_err['val'] self.c.rx_crc_err)<line_sep>eq_(self.collisions['val'] self.c.collisions)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.port_no['val'] res.port_no)<line_sep>eq_(self.rx_packets['val'] res.rx_packets)<line_sep>eq_(self.tx_packets['val'] res.tx_packets)<line_sep>eq_(self.rx_bytes['val'] res.rx_bytes)<line_sep>eq_(self.tx_bytes['val'] res.tx_bytes)<line_sep>eq_(self.rx_dropped['val'] res.rx_dropped)<line_sep>eq_(self.tx_dropped['val'] res.tx_dropped)<line_sep>eq_(self.rx_errors['val'] res.rx_errors)<line_sep>eq_(self.tx_errors['val'] res.tx_errors)<line_sep>eq_(self.rx_frame_err['val'] res.rx_frame_err)<line_sep>eq_(self.rx_over_err['val'] res.rx_over_err)<line_sep>eq_(self.rx_crc_err['val'] res.rx_crc_err)<line_sep>eq_(self.collisions['val'] res.collisions)<block_end><block_end><class_stmt>TestOFPQueueStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueueStats """<line_sep># OFP_QUEUE_STATS_PACK_STR # '!H2xIQQQ...port_no, queue_id, tx_bytes, tx_packets, tx_errors port_no={'buf':'\xe7\x6b' 'val':59243}<line_sep>zfill='\x00'<times>2<line_sep>queue_id={'buf':'\x2a\xa8\x7f\x32' 'val':715685682}<line_sep>tx_bytes={'buf':'\x77\xe1\xd5\x63\x18\xae\x63\xaa' 'val':8638420181865882538}<line_sep>tx_packets={'buf':'\x27\xa4\x41\xd7\xd4\x53\x9e\x42' 'val':2856480458895760962}<line_sep>tx_errors={'buf':'\x57\x32\x08\x2f\x88\x32\x40\x6b' 'val':6283093430376743019}<line_sep>c=OFPQueueStats(port_no['val'] queue_id['val'] tx_bytes['val'] tx_packets['val'] tx_errors['val'])<line_sep>buf=port_no['buf']+zfill+queue_id['buf']+tx_bytes['buf']+tx_packets['buf']+tx_errors['buf']<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.port_no['val'] self.c.port_no)<line_sep>eq_(self.queue_id['val'] self.c.queue_id)<line_sep>eq_(self.tx_bytes['val'] self.c.tx_bytes)<line_sep>eq_(self.tx_packets['val'] self.c.tx_packets)<line_sep>eq_(self.tx_errors['val'] self.c.tx_errors)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.port_no['val'] res.port_no)<line_sep>eq_(self.queue_id['val'] res.queue_id)<line_sep>eq_(self.tx_bytes['val'] res.tx_bytes)<line_sep>eq_(self.tx_packets['val'] res.tx_packets)<line_sep>eq_(self.tx_errors['val'] res.tx_errors)<block_end><block_end><class_stmt>TestOFPVendorStats(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPVendorStats """<line_sep>specific_data='specific_data'<line_sep>specific_data_after='data'<line_sep>offset=specific_data.find(specific_data_after)<line_sep>c=OFPVendorStats(specific_data)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.specific_data self.c.specific_data)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.specific_data self.offset)<line_sep>eq_(self.specific_data_after res.specific_data)<block_end><block_end><class_stmt>TestOFPQueuePropNone(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueuePropNone """<line_sep># OFP_QUEUE_PROP_HEADER_PACK_STR # '!HH4x'...property_, len_ property={'buf':'\x00\x00' 'val':ofproto.OFPQT_NONE}<line_sep>len={'buf':'\x00\x08' 'val':ofproto.OFP_QUEUE_PROP_HEADER_SIZE}<line_sep>zfill='\x00'<times>4<line_sep>c=OFPQueuePropNone()<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>cls=OFPQueuePropHeader._QUEUE_PROPERTIES[self.c.cls_prop_type]<line_sep>eq_(self.property['val'] self.c.cls_prop_type)<line_sep>eq_(self.property['val'] self.c.property)<line_sep>eq_(self.property['val'] cls.cls_prop_type)<line_sep>eq_(self.len['val'] self.c.cls_prop_len)<line_sep>eq_(self.len['val'] self.c.len)<line_sep>eq_(self.len['val'] cls.cls_prop_len)<block_end><def_stmt>test_parser self<block_start>buf=self.property['buf']+self.len['buf']+self.zfill<line_sep>ok_(self.c.parser(buf 0))<block_end><block_end><class_stmt>TestOFPQueuePropMinRate(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueuePropMinRate """<line_sep># OFP_QUEUE_PROP_MIN_RATE_PACK_STR # '!H6x'...rate rate={'buf':'\x00\x01' 'val':ofproto.OFPQT_MIN_RATE}<line_sep>len={'buf':'\x00\x10' 'val':ofproto.OFP_QUEUE_PROP_MIN_RATE_SIZE}<line_sep>zfill='\x00'<times>6<line_sep>buf=rate['buf']+zfill<line_sep>c=OFPQueuePropMinRate(rate['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>cls=OFPQueuePropHeader._QUEUE_PROPERTIES[self.c.cls_prop_type]<line_sep>eq_(self.rate['val'] self.c.cls_prop_type)<line_sep>eq_(self.rate['val'] self.c.rate)<line_sep>eq_(self.rate['val'] cls.cls_prop_type)<line_sep>eq_(self.len['val'] self.c.cls_prop_len)<line_sep>eq_(self.len['val'] cls.cls_prop_len)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.rate['val'] res.rate)<block_end><block_end><class_stmt>TestOFPPacketQueue(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPacketQueue """<line_sep># OFP_PACKET_QUEUE_PQCK_STR # '!IH2x'...queue_id, len_, zfill queue_id={'buf':'\x4d\x4b\x3a\xd1' 'val':1296775889}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_QUEUE_PROP_HEADER_SIZE}<line_sep>zfill='\x00'<times>2<line_sep>buf=queue_id['buf']+len_['buf']+zfill<line_sep>c=OFPPacketQueue(queue_id['val'] len_['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.queue_id['val'] self.c.queue_id)<line_sep>eq_(self.len_['val'] self.c.len)<block_end><def_stmt>test_parser self<block_start>res=self.c.parser(self.buf 0)<line_sep>eq_(self.queue_id['val'] res.queue_id)<line_sep>eq_(self.len_['val'] res.len)<block_end><def_stmt>test_parser_append_prop self# OFP_QUEUE_PROP_HEADER_PACK_STR + OFP_QUEUE_PROP_MIN_RATE_PACK_STR # '!HH4xH6x'...type, len, zfill, rate, zfill <block_start>len_={'buf':'\x00\x10' 'val':ofproto.OFP_QUEUE_PROP_MIN_RATE_SIZE}<line_sep>a_type={'buf':'\x00\x01' 'val':ofproto.OFPQT_MIN_RATE}<line_sep>a_len={'buf':'\x00\x10' 'val':ofproto.OFP_QUEUE_PROP_MIN_RATE_SIZE}<line_sep>a_zfill0='\x00'<times>4<line_sep>a_rate={'buf':'\x00\x01' 'val':ofproto.OFPQT_MIN_RATE}<line_sep>a_zfill1='\x00'<times>6<line_sep>buf=self.queue_id['buf']+len_['buf']+self.zfill+a_type['buf']+a_len['buf']+a_zfill0+a_rate['buf']+a_zfill1<line_sep>res=self.c.parser(buf 0)<line_sep>eq_(self.queue_id['val'] res.queue_id)<line_sep>eq_(len_['val'] res.len)<line_sep>append_cls=res.properties[0]<line_sep>eq_(a_type['val'] append_cls.property)<line_sep>eq_(a_len['val'] append_cls.len)<line_sep>eq_(a_rate['val'] append_cls.rate)<block_end><block_end><class_stmt>TestOFPHello(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPHello """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version=ofproto.OFP_VERSION<line_sep>msg_type=ofproto.OFPT_HELLO<line_sep>msg_len=ofproto.OFP_HEADER_SIZE<line_sep>xid=2183948390<line_sep>data='\x00\x01\x02\x03'<line_sep>fmt=ofproto.OFP_HEADER_PACK_STR<line_sep>buf=struct.pack(fmt version msg_type msg_len xid)+data<line_sep>res=OFPHello.parser(object version msg_type msg_len xid bytearray(buf))<line_sep>eq_(version res.version)<line_sep>eq_(msg_type res.msg_type)<line_sep>eq_(msg_len res.msg_len)<line_sep>eq_(xid res.xid)<line_sep>eq_(buffer(buf) res.buf)<block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPHello(Datapath)<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_HELLO c.msg_type)<line_sep>eq_(0 c.xid)<block_end><block_end><class_stmt>TestOFPErrorMsg(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPErrorMsg """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x01' 'val':ofproto.OFPT_ERROR}<line_sep>msg_len={'buf':'\x00\x0c' 'val':ofproto.OFP_ERROR_MSG_SIZE}<line_sep>xid={'buf':'\x87\x8b\x26\x7c' 'val':2274043516}<line_sep>type={'buf':'\xab\x3e' 'val':43838}<line_sep>code={'buf':'\x5d\x3c' 'val':23868}<line_sep>data='Error Message.'<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+type['buf']+code['buf']+data<line_sep>res=OFPErrorMsg.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type['val'] res.type)<line_sep>eq_(code['val'] res.code)<line_sep>eq_(data res.data)<block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>type=1306<line_sep>code=13774<line_sep>data='Error Message.'<line_sep>c=OFPErrorMsg(Datapath)<line_sep>c.type=type<line_sep>c.code=code<line_sep>c.data=data<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_ERROR c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_ERROR_MSG_PACK_STR.replace('!' '')+str(len(data))+'s'<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_ERROR res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(type res[4])<line_sep>eq_(code res[5])<line_sep>eq_(data res[6])<block_end><block_end><class_stmt>TestOFPEchoRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPEchoRequest """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x02' 'val':ofproto.OFPT_ECHO_REQUEST}<line_sep>msg_len={'buf':'\x00\x08' 'val':ofproto.OFP_HEADER_SIZE}<line_sep>xid={'buf':'\x84\x47\xef\x3f' 'val':2219306815}<line_sep>data='Request Message.'<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+data<line_sep>res=OFPEchoRequest.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(data res.data)<block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>data='Request Message.'<line_sep>c=OFPEchoRequest(Datapath)<line_sep>c.data=data<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_ECHO_REQUEST c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+str(len(data))+'s'<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_ECHO_REQUEST res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(data res[4])<block_end><block_end><class_stmt>TestOFPEchoReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPEchoReply """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x03' 'val':ofproto.OFPT_ECHO_REPLY}<line_sep>msg_len={'buf':'\x00\x08' 'val':ofproto.OFP_HEADER_SIZE}<line_sep>xid={'buf':'\x6e\x21\x3e\x62' 'val':1847672418}<line_sep>data='Reply Message.'<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+data<line_sep>res=OFPEchoReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(data res.data)<block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>data='Reply Message.'<line_sep>c=OFPEchoReply(Datapath)<line_sep>c.data=data<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_ECHO_REPLY c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+str(len(data))+'s'<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_ECHO_REPLY res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(data res[4])<block_end><block_end><class_stmt>TestOFPVendor(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPVendor """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x04' 'val':ofproto.OFPT_VENDOR}<line_sep>msg_len={'buf':'\x00\x0c' 'val':ofproto.OFP_VENDOR_HEADER_SIZE}<line_sep>xid={'buf':'\x05\x45\xdf\x18' 'val':88465176}<line_sep>vendor={'buf':'\x53\xea\x25\x3e' 'val':1407853886}<line_sep>data='Vendor Message.'<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+vendor['buf']+data<line_sep>res=OFPVendor.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(vendor['val'] res.vendor)<line_sep>eq_(data res.data)<block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>vendor={'buf':'\x38\x4b\xf9\x6c' 'val':944503148}<line_sep>data='Reply Message.'<line_sep>c=OFPVendor(Datapath)<line_sep>c.vendor=vendor['val']<line_sep>c.data=data<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_VENDOR c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>eq_(vendor['val'] c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_VENDOR_HEADER_PACK_STR.replace('!' '')+str(len(data))+'s'<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(vendor['val'] res[4])<line_sep>eq_(data res[5])<block_end><block_end># class TestNXTRequest(unittest.TestCase): <class_stmt>TestNiciraHeader(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NiciraHeader """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>subtype=ofproto.NXT_FLOW_MOD_TABLE_ID<line_sep>c=NiciraHeader(object subtype)<line_sep>eq_(subtype c.subtype)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>data='Reply Message.'<line_sep>subtype=ofproto.NXT_FLOW_MOD_TABLE_ID<line_sep>c=NiciraHeader(Datapath subtype)<line_sep>c.data=data<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_VENDOR c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>eq_(ofproto.NX_VENDOR_ID c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.NICIRA_HEADER_PACK_STR.replace('!' '')+str(len(data))+'s'<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(ofproto.NX_VENDOR_ID res[4])<line_sep>eq_(subtype res[5])<line_sep>eq_(data res[6])<block_end><block_end><class_stmt>TestNXTSetFlowFormat(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXTSetFlowFormat """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>flow_format={'buf':'\xdc\x6b\xf5\x24' 'val':3698062628}<line_sep>c=NXTSetFlowFormat(object flow_format['val'])<line_sep>eq_(flow_format['val'] c.format)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flow_format={'buf':'\x5a\x4e\x59\xad' 'val':1515084205}<line_sep>c=NXTSetFlowFormat(Datapath flow_format['val'])<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_VENDOR c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>eq_(ofproto.NX_VENDOR_ID c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.NICIRA_HEADER_PACK_STR.replace('!' '')+ofproto.NX_SET_FLOW_FORMAT_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(ofproto.NX_VENDOR_ID res[4])<line_sep>eq_(ofproto.NXT_SET_FLOW_FORMAT res[5])<line_sep>eq_(flow_format['val'] res[6])<block_end><block_end><class_stmt>TestNXTFlowMod(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXTFlowMod """<line_sep># NX_FLOW_MOD_PACK_STR # '!Q4HI3H6x'...cokkie, command, idle_timeout, head_timeout, # priority, buffer_id, out_port, flags, rule, zfill cookie={'buf':'\x04\x56\x27\xad\xbd\x43\xd6\x83' 'val':312480851306993283}<line_sep>command={'buf':'\x61\xaa' 'val':25002}<line_sep>idle_timeout={'buf':'\x4e\xff' 'val':20223}<line_sep>hard_timeout={'buf':'\x80\x16' 'val':32790}<line_sep>priority={'buf':'\x70\x5f' 'val':28767}<line_sep>buffer_id={'buf':'\x7b\x97\x3a\x09' 'val':2073508361}<line_sep>out_port={'buf':'\x11\x7d' 'val':4477}<line_sep>flags={'buf':'\x5c\xb9' 'val':23737}<line_sep>rule=nx_match.ClsRule()<line_sep>zfill='\x00'<times>6<line_sep>port={'buf':'\x2a\xe0' 'val':10976}<line_sep>actions=[OFPActionOutput(port['val'])]<def_stmt>_get_obj self append_action=<false><block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>actions=<none><if_stmt>append_action<block_start>actions=self.actions<block_end>c=NXTFlowMod(Datapath self.cookie['val'] self.command['val'] self.idle_timeout['val'] self.hard_timeout['val'] self.priority['val'] self.buffer_id['val'] self.out_port['val'] self.flags['val'] self.rule actions)<line_sep><return>c<block_end><def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>c=self._get_obj()<line_sep>eq_(self.cookie['val'] c.cookie)<line_sep>eq_(self.command['val'] c.command)<line_sep>eq_(self.idle_timeout['val'] c.idle_timeout)<line_sep>eq_(self.hard_timeout['val'] c.hard_timeout)<line_sep>eq_(self.priority['val'] c.priority)<line_sep>eq_(self.buffer_id['val'] c.buffer_id)<line_sep>eq_(self.out_port['val'] c.out_port)<line_sep>eq_(self.flags['val'] c.flags)<line_sep>eq_(self.rule.__hash__() c.rule.__hash__())<block_end><def_stmt>test_init_append_actions self<block_start>c=self._get_obj(<true>)<line_sep>action=c.actions[0]<line_sep>eq_(ofproto.OFPAT_OUTPUT action.type)<line_sep>eq_(ofproto.OFP_ACTION_OUTPUT_SIZE action.len)<line_sep>eq_(self.port['val'] action.port)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>c=self._get_obj()<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_VENDOR c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>eq_(ofproto.NX_VENDOR_ID c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.NICIRA_HEADER_PACK_STR.replace('!' '')+ofproto.NX_FLOW_MOD_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(ofproto.NX_VENDOR_ID res[4])<line_sep>eq_(ofproto.NXT_FLOW_MOD res[5])<line_sep>eq_(self.cookie['val'] res[6])<line_sep>eq_(self.command['val'] res[7])<line_sep>eq_(self.idle_timeout['val'] res[8])<line_sep>eq_(self.hard_timeout['val'] res[9])<line_sep>eq_(self.priority['val'] res[10])<line_sep>eq_(self.buffer_id['val'] res[11])<line_sep>eq_(self.out_port['val'] res[12])<line_sep>eq_(self.flags['val'] res[13])<block_end><def_stmt>test_serialize_append_actions self<block_start>c=self._get_obj(<true>)<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_VENDOR c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>eq_(ofproto.NX_VENDOR_ID c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.NICIRA_HEADER_PACK_STR.replace('!' '')+ofproto.NX_FLOW_MOD_PACK_STR.replace('!' '')+ofproto.OFP_ACTION_OUTPUT_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(ofproto.NX_VENDOR_ID res[4])<line_sep>eq_(ofproto.NXT_FLOW_MOD res[5])<line_sep>eq_(self.cookie['val'] res[6])<line_sep>eq_(self.command['val'] res[7])<line_sep>eq_(self.idle_timeout['val'] res[8])<line_sep>eq_(self.hard_timeout['val'] res[9])<line_sep>eq_(self.priority['val'] res[10])<line_sep>eq_(self.buffer_id['val'] res[11])<line_sep>eq_(self.out_port['val'] res[12])<line_sep>eq_(self.flags['val'] res[13])<line_sep># action eq_(0 res[14])<line_sep>eq_(ofproto.OFPAT_OUTPUT res[15])<line_sep>eq_(ofproto.OFP_ACTION_OUTPUT_SIZE res[16])<line_sep>eq_(self.port['val'] res[17])<line_sep>eq_(0xffe5 res[18])<block_end><block_end><class_stmt>TestNXTRoleRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXTRoleRequest """<line_sep># NX_ROLE_PACK_STR # '!I'...role role={'buf':'\x62\x81\x27\x61' 'val':1652631393}<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=NXTRoleRequest(Datapath role['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.role['val'] self.c.role)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_VENDOR self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>eq_(ofproto.NX_VENDOR_ID self.c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.NICIRA_HEADER_PACK_STR.replace('!' '')+ofproto.NX_ROLE_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(ofproto.NX_VENDOR_ID res[4])<line_sep>eq_(ofproto.NXT_ROLE_REQUEST res[5])<line_sep>eq_(self.role['val'] res[6])<block_end><block_end><class_stmt>TestNXTFlowModTableId(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.NXTFlowModTableId """<line_sep># NX_FLOW_MOD_TABLE_ID_PACK_STR # '!B7x'...set_, zfill set_={'buf':'\x71' 'val':113}<line_sep>zfill='\x00'<times>7<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=NXTFlowModTableId(Datapath set_['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.set_['val'] self.c.set)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_VENDOR self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>eq_(ofproto.NX_VENDOR_ID self.c.vendor)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.NICIRA_HEADER_PACK_STR.replace('!' '')+ofproto.NX_FLOW_MOD_TABLE_ID_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_VENDOR res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(ofproto.NX_VENDOR_ID res[4])<line_sep>eq_(ofproto.NXT_FLOW_MOD_TABLE_ID res[5])<line_sep>eq_(self.set_['val'] res[6])<block_end><block_end><class_stmt>TestOFPSwitchFeatures(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPSwitchFeatures """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPSwitchFeatures(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x06' 'val':ofproto.OFPT_FEATURES_REPLY}<line_sep>msg_len_val=ofproto.OFP_SWITCH_FEATURES_SIZE+ofproto.OFP_PHY_PORT_SIZE<line_sep>msg_len={'buf':'\x00\x4c' 'val':msg_len_val}<line_sep>xid={'buf':'\xcc\x0a\x41\xd4' 'val':3423224276}<line_sep># OFP_SWITCH_FEATURES_PACK_STR # '!QIB3xII'...datapath_id, n_buffers, n_tables, # zfill, capabilities, actions datapath_id={'buf':'\x11\xa3\x72\x63\x61\xde\x39\x81' 'val':1270985291017894273}<line_sep>n_buffers={'buf':'\x80\x14\xd7\xf6' 'val':2148849654}<line_sep>n_tables={'buf':'\xe4' 'val':228}<line_sep>zfill='\x00'<times>3<line_sep>capabilities={'buf':'\x69\x4f\xe4\xc2' 'val':1766843586}<line_sep>actions={'buf':'\x78\x06\xd9\x0c' 'val':2013714700}<line_sep># OFP_PHY_PORT_PACK_STR # '!H6s16sIIIIII'... port_no, hw_addr, name, config, state # curr, advertised, supported, peer port_no={'buf':'\xe7\x6b' 'val':59243}<line_sep>hw_addr='3c:d1:2b:8d:3f:d6'<line_sep>name='name'.ljust(16)<line_sep>config={'buf':'\x84\xb6\x8c\x53' 'val':2226555987}<line_sep>state={'buf':'\x64\x07\xfb\xc9' 'val':1678244809}<line_sep>curr={'buf':'\xa9\xe8\x0a\x2b' 'val':2850556459}<line_sep>advertised={'buf':'\x78\xb9\x7b\x72' 'val':2025421682}<line_sep>supported={'buf':'\x7e\x65\x68\xad' 'val':2120575149}<line_sep>peer={'buf':'\xa4\x5b\x8b\xed' 'val':2757463021}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+datapath_id['buf']+n_buffers['buf']+n_tables['buf']+zfill+capabilities['buf']+actions['buf']+port_no['buf']+addrconv.mac.text_to_bin(hw_addr)+name+config['buf']+state['buf']+curr['buf']+advertised['buf']+supported['buf']+peer['buf']<line_sep>res=OFPSwitchFeatures.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(datapath_id['val'] res.datapath_id)<line_sep>eq_(n_buffers['val'] res.n_buffers)<line_sep>eq_(n_tables['val'] res.n_tables)<line_sep>eq_(capabilities['val'] res.capabilities)<line_sep>eq_(actions['val'] res.actions)<line_sep># port port=res.ports[port_no['val']]<line_sep>eq_(port_no['val'] port.port_no)<line_sep>eq_(hw_addr hw_addr)<line_sep>eq_(name port.name)<line_sep>eq_(config['val'] port.config)<line_sep>eq_(state['val'] port.state)<line_sep>eq_(curr['val'] port.curr)<line_sep>eq_(advertised['val'] port.advertised)<line_sep>eq_(supported['val'] port.supported)<line_sep>eq_(peer['val'] port.peer)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPPortStatus(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPortStatus """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPPortStatus(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x0c' 'val':ofproto.OFPT_PORT_STATUS}<line_sep>msg_len={'buf':'\x00\x40' 'val':ofproto.OFP_PORT_STATUS_SIZE}<line_sep>xid={'buf':'\x06\x27\x8b\x7b' 'val':103254907}<line_sep># OFP_PORT_STATUS_PACK_STR # '!B7xH6s16sIIIIII'...reason, zfill, port_no, hw_addr, # name, config, state, curr, # advertised, supported, peer reason={'buf':'\x71' 'val':113}<line_sep>zfill='\x00'<times>7<line_sep>port_no={'buf':'\x48\xd8' 'val':18648}<line_sep>hw_addr='41:f7:a3:52:8f:6b'<line_sep>name='name'.ljust(16)<line_sep>config={'buf':'\xae\x73\x90\xec' 'val':2926809324}<line_sep>state={'buf':'\x41\x37\x32\x1d' 'val':1094136349}<line_sep>curr={'buf':'\xa9\x47\x13\x2c' 'val':2840007468}<line_sep>advertised={'buf':'\xce\x6b\x4a\x87' 'val':3463137927}<line_sep>supported={'buf':'\xb8\x06\x65\xa1' 'val':3087426977}<line_sep>peer={'buf':'\x6a\x11\x52\x39' 'val':1779520057}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+reason['buf']+zfill+port_no['buf']+addrconv.mac.text_to_bin(hw_addr)+name+config['buf']+state['buf']+curr['buf']+advertised['buf']+supported['buf']+peer['buf']<line_sep>res=OFPPortStatus.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(reason['val'] res.reason)<line_sep># desc desc=res.desc<line_sep>eq_(port_no['val'] desc.port_no)<line_sep>eq_(hw_addr desc.hw_addr)<line_sep>eq_(name desc.name)<line_sep>eq_(config['val'] desc.config)<line_sep>eq_(state['val'] desc.state)<line_sep>eq_(curr['val'] desc.curr)<line_sep>eq_(advertised['val'] desc.advertised)<line_sep>eq_(supported['val'] desc.supported)<line_sep>eq_(peer['val'] desc.peer)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPPacketIn(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPacketIn """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPPacketIn(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>_test_parser self padding=<false><block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x0a' 'val':ofproto.OFPT_PACKET_IN}<line_sep>msg_len={'buf':'\x00\x14' 'val':ofproto.OFP_PACKET_IN_SIZE}<line_sep>xid={'buf':'\xd0\x23\x8c\x34' 'val':3491990580}<line_sep># OFP_PACKET_IN_PACK_STR # '!IHHBx2x'...buffer_id, total_len, # in_port, reason, zfill, data buffer_id={'buf':'\xae\x73\x90\xec' 'val':2926809324}<line_sep>total_len={'buf':'\x00\x10' 'val':16}<line_sep>in_port={'buf':'\x08\x42' 'val':2114}<line_sep>reason={'buf':'\x43' 'val':67}<line_sep>zfill='\x00'<times>1<if_stmt>padding<block_start>data='PACKET IN'.ljust(20)<block_end><else_stmt><block_start>data='PACKET IN'.ljust(16)<block_end>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+buffer_id['buf']+total_len['buf']+in_port['buf']+reason['buf']+zfill+data<line_sep>res=OFPPacketIn.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(buffer_id['val'] res.buffer_id)<line_sep>eq_(total_len['val'] res.total_len)<line_sep>eq_(in_port['val'] res.in_port)<line_sep>eq_(reason['val'] res.reason)<line_sep>eq_(data[0:16] res.data)<line_sep><return><true><block_end><def_stmt>test_parser self<block_start>ok_(self._test_parser())<block_end><def_stmt>test_parser_padding self<block_start>ok_(self._test_parser(<true>))<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPGetConfigReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPGetConfigReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPGetConfigReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x0a' 'val':ofproto.OFPT_GET_CONFIG_REPLY}<line_sep>msg_len={'buf':'\x00\x14' 'val':ofproto.OFP_SWITCH_CONFIG_SIZE}<line_sep>xid={'buf':'\x94\xc4\xd2\xcd' 'val':2495926989}<line_sep># OFP_SWITCH_CONFIG_PACK_STR # '!HH'...flags, miss_send_len flags={'buf':'\xa0\xe2' 'val':41186}<line_sep>miss_send_len={'buf':'\x36\x0e' 'val':13838}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']+flags['buf']+miss_send_len['buf']<line_sep>res=OFPGetConfigReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(flags['val'] res.flags)<line_sep>eq_(miss_send_len['val'] res.miss_send_len)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPBarrierReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPBarrierReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPBarrierReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x13' 'val':ofproto.OFPT_BARRIER_REPLY}<line_sep>msg_len={'buf':'\x00\x08' 'val':ofproto.OFP_HEADER_SIZE}<line_sep>xid={'buf':'\x66\xc4\xc3\xac' 'val':1724171180}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep>res=OFPBarrierReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPFlowRemoved(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPFlowRemoved """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPFlowRemoved(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x0a' 'val':ofproto.OFPT_FLOW_REMOVED}<line_sep>msg_len={'buf':'\x00\x14' 'val':ofproto.OFP_FLOW_REMOVED_SIZE}<line_sep>xid={'buf':'\x94\xc4\xd2\xcd' 'val':2495926989}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan, # dl_vlan_pcp, dl_type, nw_tos, nw_proto, # nw_src, nw_dst, tp_src, tp_dst wildcards={'buf':'\xd2\x71\x25\x23' 'val':3530630435}<line_sep>in_port={'buf':'\x37\x8b' 'val':14219}<line_sep>dl_src='\x7f\x85\xc4\x70\x12\xda'<line_sep>dl_dst='\x0a\x51\x17\x58\xb0\xbb'<line_sep>dl_vlan={'buf':'\xc1\xf9' 'val':49657}<line_sep>dl_vlan_pcp={'buf':'\x79' 'val':121}<line_sep>zfill0='\x00'<line_sep>dl_type={'buf':'\xa6\x9e' 'val':42654}<line_sep>nw_tos={'buf':'\xde' 'val':222}<line_sep>nw_proto={'buf':'\xe5' 'val':229}<line_sep>zfil11='\x00'<times>2<line_sep>nw_src={'buf':'\x1b\x6d\x8d\x4b' 'val':460164427}<line_sep>nw_dst={'buf':'\xab\x25\xe1\x20' 'val':2871386400}<line_sep>tp_src={'buf':'\xd5\xc3' 'val':54723}<line_sep>tp_dst={'buf':'\x78\xb9' 'val':30905}<line_sep>buf<augadd>wildcards['buf']+in_port['buf']+dl_src+dl_dst+dl_vlan['buf']+dl_vlan_pcp['buf']+zfill0+dl_type['buf']+nw_tos['buf']+nw_proto['buf']+zfil11+nw_src['buf']+nw_dst['buf']+tp_src['buf']+tp_dst['buf']<line_sep># OFP_FLOW_REMOVED_PACK_STR0 # '!QHBxIIH2xQQ'...cookie, priority, reason, zfill, # duration_sec, duration_nsec, idle_timeout, # zfill, packet_count, byte_count cookie={'buf':'\x02\x79\xba\x00\xef\xab\xee\x44' 'val':178378173441633860}<line_sep>priority={'buf':'\x02\xce' 'val':718}<line_sep>reason={'buf':'\xa9' 'val':169}<line_sep>zfill0='\x00'<times>1<line_sep>duration_sec={'buf':'\x86\x24\xa3\xba' 'val':2250548154}<line_sep>duration_nsec={'buf':'\x94\x94\xc2\x23' 'val':2492776995}<line_sep>idle_timeout={'buf':'\xeb\x7c' 'val':60284}<line_sep>zfill1='\x00'<times>2<line_sep>packet_count={'buf':'\x5a\x0d\xf2\x03\x8e\x0a\xbb\x8d' 'val':6489108735192644493}<line_sep>byte_count={'buf':'\x65\xc8\xd3\x72\x51\xb5\xbb\x7c' 'val':7334344481123449724}<line_sep>buf<augadd>cookie['buf']+priority['buf']+reason['buf']+zfill0+duration_sec['buf']+duration_nsec['buf']+idle_timeout['buf']+zfill1+packet_count['buf']+byte_count['buf']<line_sep>res=OFPFlowRemoved.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(cookie['val'] res.cookie)<line_sep>eq_(priority['val'] res.priority)<line_sep>eq_(reason['val'] res.reason)<line_sep>eq_(duration_sec['val'] res.duration_sec)<line_sep>eq_(duration_nsec['val'] res.duration_nsec)<line_sep>eq_(idle_timeout['val'] res.idle_timeout)<line_sep>eq_(packet_count['val'] res.packet_count)<line_sep>eq_(byte_count['val'] res.byte_count)<line_sep># match match=res.match<line_sep>eq_(wildcards['val'] match.wildcards)<line_sep>eq_(in_port['val'] match.in_port)<line_sep>eq_(dl_src match.dl_src)<line_sep>eq_(dl_dst match.dl_dst)<line_sep>eq_(dl_vlan['val'] match.dl_vlan)<line_sep>eq_(dl_vlan_pcp['val'] match.dl_vlan_pcp)<line_sep>eq_(dl_type['val'] match.dl_type)<line_sep>eq_(nw_tos['val'] match.nw_tos)<line_sep>eq_(nw_proto['val'] match.nw_proto)<line_sep>eq_(nw_src['val'] match.nw_src)<line_sep>eq_(nw_dst['val'] match.nw_dst)<line_sep>eq_(tp_src['val'] match.tp_src)<line_sep>eq_(tp_dst['val'] match.tp_dst)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPQueueGetConfigReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueueGetConfigReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPQueueGetConfigReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x0a' 'val':ofproto.OFPT_QUEUE_GET_CONFIG_REPLY}<line_sep>msg_len_val=ofproto.OFP_QUEUE_GET_CONFIG_REPLY_SIZE+ofproto.OFP_PACKET_QUEUE_SIZE<line_sep>msg_len={'buf':'\x00\x14' 'val':msg_len_val}<line_sep>xid={'buf':'\x94\xc4\xd2\xcd' 'val':2495926989}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR # '!H6x'...port, zfill port={'buf':'\xfe\x66' 'val':65126}<line_sep>zfill='\x00'<times>6<line_sep>buf<augadd>port['buf']+zfill<line_sep># OFP_PACKET_QUEUE_PQCK_STR # '!IH2x'...queue_id, len_, zfill queue_id={'buf':'\x4d\x4b\x3a\xd1' 'val':1296775889}<line_sep>len_={'buf':'\x00\x08' 'val':ofproto.OFP_QUEUE_PROP_HEADER_SIZE}<line_sep>zfill='\x00'<times>2<line_sep>buf<augadd>queue_id['buf']+len_['buf']+zfill<line_sep>res=OFPQueueGetConfigReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(port['val'] res.port)<line_sep># queue queue=res.queues[0]<line_sep>eq_(queue_id['val'] queue.queue_id)<line_sep>eq_(len_['val'] queue.len)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPDescStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPDescStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPDescStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep>msg_len_val=ofproto.OFP_STATS_MSG_SIZE+ofproto.OFP_DESC_STATS_SIZE<line_sep>msg_len={'buf':'\x04\x38' 'val':msg_len_val}<line_sep>xid={'buf':'\x94\xc4\xd2\xcd' 'val':2495926989}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\x00\x00' 'val':ofproto.OFPST_DESC}<line_sep>flags={'buf':'\x30\xd9' 'val':12505}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPDescStats # OFP_DESC_STATS_PACK_STR # '!256s256s256s32s256s'...mfr_desc, hw_desc, sw_desc, # serial_num, dp_desc mfr_desc='mfr_desc'.ljust(256)<line_sep>hw_desc='hw_desc'.ljust(256)<line_sep>sw_desc='sw_desc'.ljust(256)<line_sep>serial_num='serial_num'.ljust(32)<line_sep>dp_desc='dp_desc'.ljust(256)<line_sep>buf<augadd>mfr_desc+hw_desc+sw_desc+serial_num+dp_desc<line_sep>res=OFPDescStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body<line_sep>eq_(mfr_desc body.mfr_desc)<line_sep>eq_(hw_desc body.hw_desc)<line_sep>eq_(sw_desc body.sw_desc)<line_sep>eq_(serial_num body.serial_num)<line_sep>eq_(dp_desc body.dp_desc)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPFlowStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPFlowStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPFlowStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep>msg_len_val=ofproto.OFP_STATS_MSG_SIZE+ofproto.OFP_FLOW_STATS_SIZE<line_sep>msg_len={'buf':'\x00\x64' 'val':msg_len_val}<line_sep>xid={'buf':'\x94\xc4\xd2\xcd' 'val':2495926989}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\x00\x01' 'val':ofproto.OFPST_FLOW}<line_sep>flags={'buf':'\x95\xf4' 'val':38388}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPFlowStats # OFP_FLOW_STATS_0_PACK_STR # '!HBx'...length, table_id, zfill length={'buf':'\x00\x60' 'val':96}<line_sep>table_id={'buf':'\x51' 'val':81}<line_sep>zfill='\x00'<line_sep>buf<augadd>length['buf']+table_id['buf']+zfill<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'... match='\x97\x7c\xa6\x1e'+'\x5e\xa0'+'\x70\x17\xdc\x80\x59\x9e'+'\x79\xc6\x56\x87\x92\x28'+'\xb1\x81'+'\xbe'+'\x00'+'\x01\xab'+'\x42'+'\xfe'+'\x00\x00'+'\xa4\x5d\x5c\x42'+'\xa2\x5c\x2e\x05'+'\x5a\x94'+'\x64\xd4'<line_sep>buf<augadd>match<line_sep># OFP_FLOW_STATS_1_PACK_STR # '!IIHHH6xQQQ'...duration_sec, duration_nsec, priority, # idle_timeout, hard_timeout, zfill, # cookie, packet_count, byte_count duration_sec={'buf':'\x94\x19\xb3\xd2' 'val':2484712402}<line_sep>duration_nsec={'buf':'\xee\x66\xcf\x7c' 'val':3999715196}<line_sep>priority={'buf':'\xe1\xc0' 'val':57792}<line_sep>idle_timeout={'buf':'\x8e\x10' 'val':36368}<line_sep>hard_timeout={'buf':'\xd4\x99' 'val':54425}<line_sep>zfill='\x00'<times>6<line_sep>cookie={'buf':'\x0b\x01\xe8\xe5\xf0\x84\x8a\xe0' 'val':793171083674290912}<line_sep>packet_count={'buf':'\x47\x5c\xc6\x05\x28\xff\x7c\xdb' 'val':5142202600015232219}<line_sep>byte_count={'buf':'\x24\xe9\x4b\xee\xcb\x57\xd9\xc3' 'val':2659740543924820419}<line_sep>buf<augadd>duration_sec['buf']<line_sep>buf<augadd>duration_nsec['buf']<line_sep>buf<augadd>priority['buf']<line_sep>buf<augadd>idle_timeout['buf']<line_sep>buf<augadd>hard_timeout['buf']<line_sep>buf<augadd>zfill<line_sep>buf<augadd>cookie['buf']<line_sep>buf<augadd>packet_count['buf']<line_sep>buf<augadd>byte_count['buf']<line_sep># <action>_PACK_STR...type_, len_ [others...] type={'buf':'\x00\x00' 'val':ofproto.OFPAT_OUTPUT}<line_sep>len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>port={'buf':'\x59\x2a' 'val':22826}<line_sep>max_len={'buf':'\x00\x08' 'val':ofproto.OFP_ACTION_OUTPUT_SIZE}<line_sep>buf<augadd>type['buf']+len['buf']+port['buf']+max_len['buf']<line_sep>res=OFPFlowStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body[0]<line_sep>eq_(length['val'] body.length)<line_sep>eq_(table_id['val'] body.table_id)<line_sep>eq_(duration_sec['val'] body.duration_sec)<line_sep>eq_(duration_nsec['val'] body.duration_nsec)<line_sep>eq_(priority['val'] body.priority)<line_sep>eq_(idle_timeout['val'] body.idle_timeout)<line_sep>eq_(hard_timeout['val'] body.hard_timeout)<line_sep>eq_(cookie['val'] body.cookie)<line_sep>eq_(packet_count['val'] body.packet_count)<line_sep>eq_(byte_count['val'] body.byte_count)<line_sep># action action=body.actions[0]<line_sep>eq_(type['val'] action.type)<line_sep>eq_(len['val'] action.len)<line_sep>eq_(port['val'] action.port)<line_sep>eq_(max_len['val'] action.max_len)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPAggregateStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPAggregateStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPAggregateStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep>msg_len_val=ofproto.OFP_STATS_MSG_SIZE+ofproto.OFP_AGGREGATE_STATS_REPLY_SIZE<line_sep>msg_len={'buf':'\x00\x4c' 'val':msg_len_val}<line_sep>xid={'buf':'\xc6\xd6\xce\x38' 'val':3335966264}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\x00\x02' 'val':ofproto.OFPST_AGGREGATE}<line_sep>flags={'buf':'\x65\x66' 'val':25958}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPAggregateStats # OFP_AGGREGATE_STATS_REPLY_PACK_STR # '!QQI4x'...packet_count, byte_count, flow_count, zfill packet_count={'buf':'\x43\x95\x1b\xfb\x0f\xf6\xa7\xdd' 'val':4869829337189623773}<line_sep>byte_count={'buf':'\x36\xda\x2d\x80\x2a\x95\x35\xdd' 'val':3952521651464517085}<line_sep>flow_count={'buf':'\xc3\x0d\xc3\xed' 'val':3272459245}<line_sep>zfill='\x00'<times>4<line_sep>buf<augadd>packet_count['buf']+byte_count['buf']+flow_count['buf']+zfill<line_sep>res=OFPAggregateStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body[0]<line_sep>eq_(packet_count['val'] body.packet_count)<line_sep>eq_(byte_count['val'] body.byte_count)<line_sep>eq_(flow_count['val'] body.flow_count)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPTableStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPTableStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPTableStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep>msg_len_val=ofproto.OFP_STATS_MSG_SIZE+ofproto.OFP_TABLE_STATS_SIZE<line_sep>msg_len={'buf':'\x00\x4c' 'val':msg_len_val}<line_sep>xid={'buf':'\xd6\xb4\x8d\xe6' 'val':3602157030}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\x00\x03' 'val':ofproto.OFPST_TABLE}<line_sep>flags={'buf':'\xb3\xf0' 'val':46064}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPTableStats # OFP_TABLE_STATS_PACK_STR # '!B3x32sIIIQQ'...table_id, zfill, name, wildcards, max_entries, # active_count, lookup_count, matched_count table_id={'buf':'\x5b' 'val':91}<line_sep>zfill='\x00'<times>3<line_sep>name='name'.ljust(32)<line_sep>wildcards={'buf':'\xc5\xaf\x6e\x12' 'val':3316608530}<line_sep>max_entries={'buf':'\x95\x6c\x78\x4d' 'val':2506913869}<line_sep>active_count={'buf':'\x78\xac\xa8\x1e' 'val':2024581150}<line_sep>lookup_count={'buf':'\x40\x1d\x9c\x39\x19\xec\xd4\x1c' 'val':4620020561814017052}<line_sep>matched_count={'buf':'\x27\x35\x02\xb6\xc5\x5e\x17\x65' 'val':2825167325263435621}<line_sep>buf<augadd>table_id['buf']+zfill+name+wildcards['buf']+max_entries['buf']+active_count['buf']+lookup_count['buf']+matched_count['buf']<line_sep>res=OFPTableStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body[0]<line_sep>eq_(table_id['val'] body.table_id)<line_sep>eq_(name body.name)<line_sep>eq_(wildcards['val'] body.wildcards)<line_sep>eq_(max_entries['val'] body.max_entries)<line_sep>eq_(active_count['val'] body.active_count)<line_sep>eq_(lookup_count['val'] body.lookup_count)<line_sep>eq_(matched_count['val'] body.matched_count)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPPortStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPortStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPPortStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep>msg_len_val=ofproto.OFP_STATS_MSG_SIZE+ofproto.OFP_PORT_STATS_SIZE<line_sep>msg_len={'buf':'\x00\x74' 'val':msg_len_val}<line_sep>xid={'buf':'\xc2\xaf\x3d\xff' 'val':3266264575}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\x00\x04' 'val':ofproto.OFPST_PORT}<line_sep>flags={'buf':'\xda\xde' 'val':56030}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPPortStats # OFP_PORT_STATS_PACK_STR # '!H6xQQQQQQQQQQQQ'... port_no, zfill, rx_packets, tx_packets, # rx_bytes, tx_bytes, rx_dropped, tx_dropped, # rx_errors, tx_errors, rx_frame_err, # rx_over_err, rx_crc_err, collisions port_no={'buf':'\xe7\x6b' 'val':59243}<line_sep>zfill='\x00'<times>6<line_sep>rx_packets={'buf':'\x53\x44\x36\x61\xc4\x86\xc0\x37' 'val':5999980397101236279}<line_sep>tx_packets={'buf':'\x27\xa4\x41\xd7\xd4\x53\x9e\x42' 'val':2856480458895760962}<line_sep>rx_bytes={'buf':'\x55\xa1\x38\x60\x43\x97\x0d\x89' 'val':6170274950576278921}<line_sep>tx_bytes={'buf':'\x77\xe1\xd5\x63\x18\xae\x63\xaa' 'val':8638420181865882538}<line_sep>rx_dropped={'buf':'\x60\xe6\x20\x01\x24\xda\x4e\x5a' 'val':6982303461569875546}<line_sep>tx_dropped={'buf':'\x09\x2d\x5d\x71\x71\xb6\x8e\xc7' 'val':661287462113808071}<line_sep>rx_errors={'buf':'\x2f\x7e\x35\xb3\x66\x3c\x19\x0d' 'val':3422231811478788365}<line_sep>tx_errors={'buf':'\x57\x32\x08\x2f\x88\x32\x40\x6b' 'val':6283093430376743019}<line_sep>rx_frame_err={'buf':'\x0c\x28\x6f\xad\xce\x66\x6e\x8b' 'val':876072919806406283}<line_sep>rx_over_err={'buf':'\x5a\x90\x8f\x9b\xfc\x82\x2e\xa0' 'val':6525873760178941600}<line_sep>rx_crc_err={'buf':'\x73\x3a\x71\x17\xd6\x74\x69\x47' 'val':8303073210207070535}<line_sep>collisions={'buf':'\x2f\x52\x0c\x79\x96\x03\x6e\x79' 'val':3409801584220270201}<line_sep>buf<augadd>port_no['buf']+zfill+rx_packets['buf']+tx_packets['buf']+rx_bytes['buf']+tx_bytes['buf']+rx_dropped['buf']+tx_dropped['buf']+rx_errors['buf']+tx_errors['buf']+rx_frame_err['buf']+rx_over_err['buf']+rx_crc_err['buf']+collisions['buf']<line_sep>res=OFPPortStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body[0]<line_sep>eq_(port_no['val'] body.port_no)<line_sep>eq_(rx_packets['val'] body.rx_packets)<line_sep>eq_(tx_packets['val'] body.tx_packets)<line_sep>eq_(rx_bytes['val'] body.rx_bytes)<line_sep>eq_(tx_bytes['val'] body.tx_bytes)<line_sep>eq_(rx_dropped['val'] body.rx_dropped)<line_sep>eq_(tx_dropped['val'] body.tx_dropped)<line_sep>eq_(rx_errors['val'] body.rx_errors)<line_sep>eq_(tx_errors['val'] body.tx_errors)<line_sep>eq_(rx_frame_err['val'] body.rx_frame_err)<line_sep>eq_(rx_over_err['val'] body.rx_over_err)<line_sep>eq_(rx_crc_err['val'] body.rx_crc_err)<line_sep>eq_(collisions['val'] body.collisions)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPQueueStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueueStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPQueueStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep>msg_len_val=ofproto.OFP_STATS_MSG_SIZE+ofproto.OFP_QUEUE_STATS_SIZE<line_sep>msg_len={'buf':'\x00\x2c' 'val':msg_len_val}<line_sep>xid={'buf':'\x19\xfc\x28\x6c' 'val':435955820}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\x00\x05' 'val':ofproto.OFPST_QUEUE}<line_sep>flags={'buf':'\x3b\x2b' 'val':15147}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPQueueStats # OFP_QUEUE_STATS_PACK_STR # '!H2xIQQQ...port_no, queue_id, tx_bytes, tx_packets, tx_errors port_no={'buf':'\xe7\x6b' 'val':59243}<line_sep>zfill='\x00'<times>2<line_sep>queue_id={'buf':'\x2a\xa8\x7f\x32' 'val':715685682}<line_sep>tx_bytes={'buf':'\x77\xe1\xd5\x63\x18\xae\x63\xaa' 'val':8638420181865882538}<line_sep>tx_packets={'buf':'\x27\xa4\x41\xd7\xd4\x53\x9e\x42' 'val':2856480458895760962}<line_sep>tx_errors={'buf':'\x57\x32\x08\x2f\x88\x32\x40\x6b' 'val':6283093430376743019}<line_sep>buf<augadd>port_no['buf']+zfill+queue_id['buf']+tx_bytes['buf']+tx_packets['buf']+tx_errors['buf']<line_sep>res=OFPQueueStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body[0]<line_sep>eq_(port_no['val'] body.port_no)<line_sep>eq_(queue_id['val'] body.queue_id)<line_sep>eq_(tx_bytes['val'] body.tx_bytes)<line_sep>eq_(tx_packets['val'] body.tx_packets)<line_sep>eq_(tx_errors['val'] body.tx_errors)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPVendorStatsReply(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPVendorStatsReply """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPVendorStatsReply(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self<block_start>version={'buf':'\x01' 'val':ofproto.OFP_VERSION}<line_sep>msg_type={'buf':'\x11' 'val':ofproto.OFPT_STATS_REPLY}<line_sep># ofproto.OFP_STATS_MSG_SIZE + len(specific_data) msg_len={'buf':'\x00\x18' 'val':ofproto.OFP_STATS_MSG_SIZE+12}<line_sep>xid={'buf':'\x94\xc4\xd2\xcd' 'val':2495926989}<line_sep>buf=version['buf']+msg_type['buf']+msg_len['buf']+xid['buf']<line_sep># OFP_STATS_MSG_PACK_STR # '!HH'...type_, flags type_={'buf':'\xff\xff' 'val':ofproto.OFPST_VENDOR}<line_sep>flags={'buf':'\x30\xd9' 'val':12505}<line_sep>buf<augadd>type_['buf']+flags['buf']<line_sep># stats_type_cls = OFPVendorStats specific_data='specific_data'<line_sep>buf<augadd>specific_data<line_sep>res=OFPVendorStatsReply.parser(object version['val'] msg_type['val'] msg_len['val'] xid['val'] buf)<line_sep>eq_(version['val'] res.version)<line_sep>eq_(msg_type['val'] res.msg_type)<line_sep>eq_(msg_len['val'] res.msg_len)<line_sep>eq_(xid['val'] res.xid)<line_sep>eq_(type_['val'] res.type)<line_sep>eq_(flags['val'] res.flags)<line_sep># body body=res.body[0]<line_sep>eq_(specific_data body)<block_end><def_stmt>test_serialize self# Not used. <block_start><pass><block_end><block_end><class_stmt>TestOFPFeaturesRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPFeaturesRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPFeaturesRequest(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_FEATURES_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt=ofproto.OFP_HEADER_PACK_STR<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_FEATURES_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<block_end><block_end><class_stmt>TestOFPGetConfigRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPGetConfigRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPGetConfigRequest(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_GET_CONFIG_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt=ofproto.OFP_HEADER_PACK_STR<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_GET_CONFIG_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<block_end><block_end><class_stmt>TestOFPSetConfig(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPSetConfig """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end># OFP_SWITCH_CONFIG_PACK_STR # '!HH'...flags, miss_send_len flags={'buf':'\xa0\xe2' 'val':41186}<line_sep>miss_send_len={'buf':'\x36\x0e' 'val':13838}<line_sep>c=OFPSetConfig(Datapath flags['val'] miss_send_len['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.flags['val'] self.c.flags)<line_sep>eq_(self.miss_send_len['val'] self.c.miss_send_len)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_SET_CONFIG self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_SWITCH_CONFIG_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_SET_CONFIG res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(self.flags['val'] res[4])<line_sep>eq_(self.miss_send_len['val'] res[5])<block_end><block_end><class_stmt>TestOFPPacketOut(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPacketOut """<line_sep>port=0x2ae0<line_sep>actions=[OFPActionOutput(port max_len=0)]<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>_get_obj self buffer_id in_port data=<none><block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPPacketOut(Datapath buffer_id in_port self.actions data)<line_sep><return>c<block_end><def_stmt>test_init self<block_start>buffer_id=0xffffffff<line_sep>in_port=0x40455<line_sep>data='Message'<line_sep>c=self._get_obj(buffer_id in_port data)<line_sep>eq_(buffer_id c.buffer_id)<line_sep>eq_(in_port c.in_port)<line_sep>eq_(data c.data)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>buffer_id=0xffffffff<line_sep>in_port=0x9e07<line_sep>data='Message'<line_sep>c=self._get_obj(buffer_id in_port data)<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_PACKET_OUT c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_PACKET_OUT_PACK_STR.replace('!' '')+ofproto.OFP_ACTION_OUTPUT_PACK_STR.replace('!' '')+str(len(data))+'s'<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_PACKET_OUT res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_PACKET_OUT_PACK_STR eq_(buffer_id res[4])<line_sep>eq_(in_port res[5])<line_sep>eq_(ofproto.OFP_ACTION_OUTPUT_SIZE res[6])<line_sep># OFP_ACTION_OUTPUT_PACK_STR eq_(ofproto.OFPAT_OUTPUT res[7])<line_sep>eq_(ofproto.OFP_ACTION_OUTPUT_SIZE res[8])<line_sep>eq_(self.port res[9])<line_sep>eq_(0 res[10])<line_sep># data eq_(data res[11])<block_end>@raises(AssertionError)<def_stmt>test_serialize_check_buffer_id self<block_start>buffer_id=0xffffff00<line_sep>in_port=0xaa92<line_sep>data='Message'<line_sep>c=self._get_obj(buffer_id in_port data)<line_sep>c.serialize()<block_end><block_end><class_stmt>TestOFPFlowMod(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPFlowMod """<line_sep># OFP_FLOW_MOD_PACK_STR0 # '!QHHHHIHH'...cookie, command, idle_timeout, hard_timeout, # priority, buffer_id, out_port, flags cookie={'buf':'\x1d\x86\xce\x6e\x8d\xc0\xbe\xa8' 'val':2127614848199081640}<line_sep>command={'buf':'\xe1\x55' 'val':57685}<line_sep>idle_timeout={'buf':'\xf3\x6d' 'val':62317}<line_sep>hard_timeout={'buf':'\x1c\xc5' 'val':7365}<line_sep>priority={'buf':'\x9c\xe3' 'val':40163}<line_sep>buffer_id={'buf':'\xf0\xa1\x80\x33' 'val':4037115955}<line_sep>out_port={'buf':'\xfe\x0d' 'val':65037}<line_sep>flags={'buf':'\x00\x87' 'val':135}<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan, # dl_vlan_pcp, dl_type, nw_tos, nw_proto, # nw_src, nw_dst, tp_src, tp_dst wildcards={'buf':'\xd2\x71\x25\x23' 'val':3530630435}<line_sep>in_port={'buf':'\x37\x8b' 'val':14219}<line_sep>dl_src='\xdf\xcf\xe1\x5d\xcf\xc0'<line_sep>dl_dst='\x76\xb3\xfb\xc6\x21\x2f'<line_sep>dl_vlan={'buf':'\xc1\xf9' 'val':49657}<line_sep>dl_vlan_pcp={'buf':'\x79' 'val':121}<line_sep>zfill0='\x00'<line_sep>dl_type={'buf':'\xa6\x9e' 'val':42654}<line_sep>nw_tos={'buf':'\xde' 'val':222}<line_sep>nw_proto={'buf':'\xe5' 'val':229}<line_sep>zfil11='\x00'<times>2<line_sep>nw_src={'buf':'\x1b\x6d\x8d\x4b' 'val':460164427}<line_sep>nw_dst={'buf':'\xab\x25\xe1\x20' 'val':2871386400}<line_sep>tp_src={'buf':'\xd5\xc3' 'val':54723}<line_sep>tp_dst={'buf':'\x78\xb9' 'val':30905}<line_sep>match=OFPMatch(wildcards['val'] in_port['val'] dl_src dl_dst dl_vlan['val'] dl_vlan_pcp['val'] dl_type['val'] nw_tos['val'] nw_proto['val'] nw_src['val'] nw_dst['val'] tp_src['val'] tp_dst['val'])<line_sep>port=0x2ae0<line_sep>actions=[OFPActionOutput(port max_len=1000)]<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>_get_obj self actions=<none><block_start><class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPFlowMod(Datapath self.match self.cookie['val'] self.command['val'] self.idle_timeout['val'] self.hard_timeout['val'] self.priority['val'] self.buffer_id['val'] self.out_port['val'] self.flags['val'] actions)<line_sep><return>c<block_end><def_stmt>test_init self<block_start>c=self._get_obj()<line_sep>eq_(self.cookie['val'] c.cookie)<line_sep>eq_(self.command['val'] c.command)<line_sep>eq_(self.idle_timeout['val'] c.idle_timeout)<line_sep>eq_(self.hard_timeout['val'] c.hard_timeout)<line_sep>eq_(self.priority['val'] c.priority)<line_sep>eq_(self.buffer_id['val'] c.buffer_id)<line_sep>eq_(self.out_port['val'] c.out_port)<line_sep>eq_(self.flags['val'] c.flags)<block_end><def_stmt>test_init_actions self<block_start>c=self._get_obj(self.actions)<line_sep>action=c.actions[0]<line_sep>eq_(self.port action.port)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>c=self._get_obj(self.actions)<line_sep>c.serialize()<line_sep>eq_(ofproto.OFP_VERSION c.version)<line_sep>eq_(ofproto.OFPT_FLOW_MOD c.msg_type)<line_sep>eq_(0 c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_MATCH_PACK_STR.replace('!' '')+ofproto.OFP_FLOW_MOD_PACK_STR0.replace('!' '')+ofproto.OFP_ACTION_OUTPUT_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_FLOW_MOD res[1])<line_sep>eq_(len(c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_MATCH_PACK_STR eq_(self.wildcards['val'] res[4])<line_sep>eq_(self.in_port['val'] res[5])<line_sep>eq_(self.dl_src res[6])<line_sep>eq_(self.dl_dst res[7])<line_sep>eq_(self.dl_vlan['val'] res[8])<line_sep>eq_(self.dl_vlan_pcp['val'] res[9])<line_sep>eq_(self.dl_type['val'] res[10])<line_sep>eq_(self.nw_tos['val'] res[11])<line_sep>eq_(self.nw_proto['val'] res[12])<line_sep>eq_(self.nw_src['val'] res[13])<line_sep>eq_(self.nw_dst['val'] res[14])<line_sep>eq_(self.tp_src['val'] res[15])<line_sep>eq_(self.tp_dst['val'] res[16])<line_sep># OFP_FLOW_MOD_PACK_STR0 eq_(self.cookie['val'] res[17])<line_sep>eq_(self.command['val'] res[18])<line_sep>eq_(self.idle_timeout['val'] res[19])<line_sep>eq_(self.hard_timeout['val'] res[20])<line_sep>eq_(self.priority['val'] res[21])<line_sep>eq_(self.buffer_id['val'] res[22])<line_sep>eq_(self.out_port['val'] res[23])<line_sep>eq_(self.flags['val'] res[24])<line_sep># OFP_ACTION_OUTPUT_PACK_STR eq_(ofproto.OFPAT_OUTPUT res[25])<line_sep>eq_(ofproto.OFP_ACTION_OUTPUT_SIZE res[26])<line_sep>eq_(self.port res[27])<line_sep>eq_(1000 res[28])<block_end><block_end><class_stmt>TestOFPBarrierRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPBarrierRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>c=OFPBarrierRequest(Datapath)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start><pass><block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_BARRIER_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt=ofproto.OFP_HEADER_PACK_STR<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_BARRIER_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<block_end><block_end><class_stmt>TestOFPQueueGetConfigRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueueGetConfigRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end># OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR # '!H2x'...port, zfill port={'buf':'\xa0\xe2' 'val':41186}<line_sep>zfill='\x00'<times>2<line_sep>c=OFPQueueGetConfigRequest(Datapath port['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(self.port['val'] self.c.port)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_QUEUE_GET_CONFIG_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>a=ofproto.OFP_HEADER_PACK_STR.replace('!' '')<line_sep>b=ofproto.OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR.replace('!' '')<line_sep>fmt='!'+a+b<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep>eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_QUEUE_GET_CONFIG_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep>eq_(self.port['val'] res[4])<block_end><block_end><class_stmt>TestOFPDescStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPDescStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep>c=OFPDescStatsRequest(Datapath flags['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_DESC self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_DESC res[4])<line_sep>eq_(self.flags['val'] res[5])<block_end><block_end><class_stmt>TestOFPFlowStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPFlowStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan, # dl_vlan_pcp, dl_type, nw_tos, nw_proto, # nw_src, nw_dst, tp_src, tp_dst wildcards={'buf':'\xd2\x71\x25\x23' 'val':3530630435}<line_sep>in_port={'buf':'\x37\x8b' 'val':14219}<line_sep>dl_src='\x58\xd0\x8a\x69\xa4\xfc'<line_sep>dl_dst='\xb6\xe2\xef\xb1\xa6\x2d'<line_sep>dl_vlan={'buf':'\xc1\xf9' 'val':49657}<line_sep>dl_vlan_pcp={'buf':'\x79' 'val':121}<line_sep>zfill0='\x00'<line_sep>dl_type={'buf':'\xa6\x9e' 'val':42654}<line_sep>nw_tos={'buf':'\xde' 'val':222}<line_sep>nw_proto={'buf':'\xe5' 'val':229}<line_sep>zfil11='\x00'<times>2<line_sep>nw_src={'buf':'\x1b\x6d\x8d\x4b' 'val':460164427}<line_sep>nw_dst={'buf':'\xab\x25\xe1\x20' 'val':2871386400}<line_sep>tp_src={'buf':'\xd5\xc3' 'val':54723}<line_sep>tp_dst={'buf':'\x78\xb9' 'val':30905}<line_sep>match=OFPMatch(wildcards['val'] in_port['val'] dl_src dl_dst dl_vlan['val'] dl_vlan_pcp['val'] dl_type['val'] nw_tos['val'] nw_proto['val'] nw_src['val'] nw_dst['val'] tp_src['val'] tp_dst['val'])<line_sep># OFP_FLOW_STATS_REQUEST_ID_PORT_STR # '!BxH'...table_id, zfill, out_port table_id={'buf':'\xd1' 'val':209}<line_sep>zfill='\x00'<times>1<line_sep>out_port={'buf':'\xe4\x9a' 'val':58522}<line_sep>c=OFPFlowStatsRequest(Datapath flags['val'] match table_id['val'] out_port['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_FLOW self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<line_sep>eq_(self.table_id['val'] self.c.table_id)<line_sep>eq_(self.out_port['val'] self.c.out_port)<line_sep># match match=self.c.match<line_sep>eq_(self.match.__hash__() match.__hash__())<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')+ofproto.OFP_MATCH_PACK_STR.replace('!' '')+ofproto.OFP_FLOW_STATS_REQUEST_ID_PORT_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_FLOW res[4])<line_sep>eq_(self.flags['val'] res[5])<line_sep># OFP_MATCH_PACK_STR eq_(self.wildcards['val'] res[6])<line_sep>eq_(self.in_port['val'] res[7])<line_sep>eq_(self.dl_src res[8])<line_sep>eq_(self.dl_dst res[9])<line_sep>eq_(self.dl_vlan['val'] res[10])<line_sep>eq_(self.dl_vlan_pcp['val'] res[11])<line_sep>eq_(self.dl_type['val'] res[12])<line_sep>eq_(self.nw_tos['val'] res[13])<line_sep>eq_(self.nw_proto['val'] res[14])<line_sep>eq_(self.nw_src['val'] res[15])<line_sep>eq_(self.nw_dst['val'] res[16])<line_sep>eq_(self.tp_src['val'] res[17])<line_sep>eq_(self.tp_dst['val'] res[18])<line_sep># OFP_FLOW_STATS_REQUEST_ID_PORT_STR eq_(self.table_id['val'] res[19])<line_sep>eq_(self.out_port['val'] res[20])<block_end><block_end><class_stmt>TestOFPAggregateStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPAggregateStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep># OFP_MATCH_PACK_STR # '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan, # dl_vlan_pcp, dl_type, nw_tos, nw_proto, # nw_src, nw_dst, tp_src, tp_dst wildcards={'buf':'\xea\x66\x4a\xd4' 'val':3932572372}<line_sep>in_port={'buf':'\x64\xac' 'val':25772}<line_sep>dl_src='\x90\x13\x60\x5e\x20\x4d'<line_sep>dl_dst='\xb5\x5d\x14\x5e\xb9\x22'<line_sep>dl_vlan={'buf':'\x8b\xeb' 'val':35819}<line_sep>dl_vlan_pcp={'buf':'\xe8' 'val':232}<line_sep>zfill0='\x00'<line_sep>dl_type={'buf':'\62\xc9' 'val':25289}<line_sep>nw_tos={'buf':'\xb5' 'val':181}<line_sep>nw_proto={'buf':'\xc4' 'val':196}<line_sep>zfil11='\x00'<times>2<line_sep>nw_src={'buf':'\xb7\xd1\xb7\xef' 'val':3083974639}<line_sep>nw_dst={'buf':'\x7c\xc6\x18\x15' 'val':2093357077}<line_sep>tp_src={'buf':'\x26\x9a' 'val':9882}<line_sep>tp_dst={'buf':'\x7a\x89' 'val':31369}<line_sep>match=OFPMatch(wildcards['val'] in_port['val'] dl_src dl_dst dl_vlan['val'] dl_vlan_pcp['val'] dl_type['val'] nw_tos['val'] nw_proto['val'] nw_src['val'] nw_dst['val'] tp_src['val'] tp_dst['val'])<line_sep># OFP_FLOW_STATS_REQUEST_ID_PORT_STR # '!BxH'...table_id, zfill, out_port table_id={'buf':'\xd1' 'val':209}<line_sep>zfill='\x00'<times>1<line_sep>out_port={'buf':'\xb5\xe8' 'val':46568}<line_sep>c=OFPAggregateStatsRequest(Datapath flags['val'] match table_id['val'] out_port['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_AGGREGATE self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<line_sep>eq_(self.table_id['val'] self.c.table_id)<line_sep>eq_(self.out_port['val'] self.c.out_port)<line_sep># match match=self.c.match<line_sep>eq_(self.match.__hash__() match.__hash__())<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')+ofproto.OFP_MATCH_PACK_STR.replace('!' '')+ofproto.OFP_FLOW_STATS_REQUEST_ID_PORT_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_AGGREGATE res[4])<line_sep>eq_(self.flags['val'] res[5])<line_sep># OFP_MATCH_PACK_STR eq_(self.wildcards['val'] res[6])<line_sep>eq_(self.in_port['val'] res[7])<line_sep>eq_(self.dl_src res[8])<line_sep>eq_(self.dl_dst res[9])<line_sep>eq_(self.dl_vlan['val'] res[10])<line_sep>eq_(self.dl_vlan_pcp['val'] res[11])<line_sep>eq_(self.dl_type['val'] res[12])<line_sep>eq_(self.nw_tos['val'] res[13])<line_sep>eq_(self.nw_proto['val'] res[14])<line_sep>eq_(self.nw_src['val'] res[15])<line_sep>eq_(self.nw_dst['val'] res[16])<line_sep>eq_(self.tp_src['val'] res[17])<line_sep>eq_(self.tp_dst['val'] res[18])<line_sep># OFP_FLOW_STATS_REQUEST_ID_PORT_STR eq_(self.table_id['val'] res[19])<line_sep>eq_(self.out_port['val'] res[20])<block_end><block_end><class_stmt>TestOFPTableStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPTableStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep>c=OFPTableStatsRequest(Datapath flags['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_TABLE self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_TABLE res[4])<line_sep>eq_(self.flags['val'] res[5])<block_end><block_end><class_stmt>TestOFPPortStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPPortStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep># OFP_PORT_STATS_REQUEST_PACK_STR # '!H6x'...port_no, zfill port_no={'buf':'\x6d\x27' 'val':27943}<line_sep>c=OFPPortStatsRequest(Datapath flags['val'] port_no['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_PORT self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<line_sep>eq_(self.port_no['val'] self.c.port_no)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')+ofproto.OFP_PORT_STATS_REQUEST_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_PORT res[4])<line_sep>eq_(self.flags['val'] res[5])<line_sep># OFP_PORT_STATS_REQUEST_PACK_STR eq_(self.port_no['val'] res[6])<block_end><block_end><class_stmt>TestOFPQueueStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPQueueStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep># OFP_QUEUE_STATS_REQUEST_PACK_STR # '!HxxI'...port_no, zfill, zfill, queue_id port_no={'buf':'\x0c\x2d' 'val':3117}<line_sep>queue_id={'buf':'\x1b\xe6\xba\x36' 'val':468105782}<line_sep>c=OFPQueueStatsRequest(Datapath flags['val'] port_no['val'] queue_id['val'])<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_QUEUE self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<line_sep>eq_(self.port_no['val'] self.c.port_no)<line_sep>eq_(self.queue_id['val'] self.c.queue_id)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')+ofproto.OFP_QUEUE_STATS_REQUEST_PACK_STR.replace('!' '')<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_QUEUE res[4])<line_sep>eq_(self.flags['val'] res[5])<line_sep># OFP_QUEUE_STATS_REQUEST_PACK_STR eq_(self.port_no['val'] res[6])<line_sep>eq_(self.queue_id['val'] res[7])<block_end><block_end><class_stmt>TestOFPVendorStatsRequest(unittest.TestCase)<block_start>""" Test case for ofproto_v1_0_parser.OFPVendorStatsRequest """<class_stmt>Datapath(object)<block_start>ofproto=ofproto# copy to class attribute ofproto_parser=ofproto_v1_0_parser<block_end>flags={'buf':'\x00\x00' 'val':0}<line_sep># OFP_VENDOR_STATS_MSG_PACK_STR # '!I'...vendor vendor={'buf':'\xff\xff\xff\xff' 'val':ofproto.OFPAT_VENDOR}<line_sep>specific_data='specific_data'<line_sep>c=OFPVendorStatsRequest(Datapath flags['val'] vendor['val'] specific_data)<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>eq_(ofproto.OFPST_VENDOR self.c.type)<line_sep>eq_(self.flags['val'] self.c.flags)<line_sep>eq_(self.vendor['val'] self.c.vendor)<line_sep>eq_(self.specific_data self.c.specific_data)<block_end><def_stmt>test_parser self# Not used. <block_start><pass><block_end><def_stmt>test_serialize self<block_start>self.c.serialize()<line_sep>eq_(ofproto.OFP_VERSION self.c.version)<line_sep>eq_(ofproto.OFPT_STATS_REQUEST self.c.msg_type)<line_sep>eq_(0 self.c.xid)<line_sep>fmt='!'+ofproto.OFP_HEADER_PACK_STR.replace('!' '')+ofproto.OFP_STATS_MSG_PACK_STR.replace('!' '')+ofproto.OFP_VENDOR_STATS_MSG_PACK_STR.replace('!' '')+str(len(self.specific_data))+'s'<line_sep>res=struct.unpack(fmt str(self.c.buf))<line_sep># OFP_HEADER_PACK_STR eq_(ofproto.OFP_VERSION res[0])<line_sep>eq_(ofproto.OFPT_STATS_REQUEST res[1])<line_sep>eq_(len(self.c.buf) res[2])<line_sep>eq_(0 res[3])<line_sep># OFP_STATS_MSG_PACK_STR eq_(ofproto.OFPST_VENDOR res[4])<line_sep>eq_(self.flags['val'] res[5])<line_sep># OFP_VENDOR_STATS_MSG_PACK_STR eq_(self.vendor['val'] res[6])<line_sep># specific_data eq_(self.specific_data res[7])<block_end><block_end>
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>base64<import_stmt>json<import_stmt>unittest<import_stmt>mock<import_from_stmt>google.appengine.ext ndb<import_from_stmt>dashboard.common testing_common<import_from_stmt>dashboard.common utils<import_from_stmt>dashboard.models graph_data<class_stmt>UtilsTest(testing_common.TestCase)<block_start><def_stmt>setUp self<block_start>super(UtilsTest self).setUp()<line_sep>testing_common.SetIsInternalUser('<EMAIL>' <true>)<line_sep>testing_common.SetIsInternalUser('<EMAIL>' <false>)<block_end><def_stmt>_AssertMatches self test_path pattern<block_start>"""Asserts that a test path matches a pattern with MatchesPattern."""<line_sep>test_key=utils.TestKey(test_path)<line_sep>self.assertTrue(utils.TestMatchesPattern(test_key pattern))<block_end><def_stmt>_AssertDoesntMatch self test_path pattern<block_start>"""Asserts that a test path doesn't match a pattern with MatchesPattern."""<line_sep>test_key=utils.TestKey(test_path)<line_sep>self.assertFalse(utils.TestMatchesPattern(test_key pattern))<block_end><def_stmt>testMatchesPattern_AllWildcards self<block_start>self._AssertMatches('ChromiumPerf/cros-one/dromaeo.top25/Total' '*/*/*/*')<line_sep>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeo.top25/Total' '*/*/*')<block_end><def_stmt>testMatchesPattern_SomeWildcards self<block_start>self._AssertMatches('ChromiumPerf/cros-one/dromaeo.top25/Total' 'ChromiumPerf/*/dromaeo.top25/*')<line_sep>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeo.top25/Total' 'ChromiumPerf/*/dromaeo.another_page_set/*')<block_end><def_stmt>testMatchesPattern_SomePartialWildcards self<block_start>self._AssertMatches('ChromiumPerf/cros-one/dromaeo.top25/Total' 'ChromiumPerf/cros-*/dromaeo.*/Total')<line_sep>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeoXtop25/Total' 'ChromiumPerf/cros-*/dromaeo.*/Total')<line_sep>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeo.top25/Total' 'OtherMaster/cros-*/dromaeo.*/Total')<block_end><def_stmt>testMatchesPattern_MorePartialWildcards self# Note that the wildcard matches zero or more characters. <block_start>self._AssertMatches('ChromiumPerf/cros-one/dromaeo.top25/Total' 'Chromium*/cros-one*/*.*/To*al')<line_sep>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeo.top25/Total' 'Chromium*/linux-*/*.*/To*al')<block_end><def_stmt>testMatchesPattern_RequiresFullMatchAtEnd self# If there is no wildcard at the beginning or end of the # test path part, then a part will only match if it matches # right up to the beginning or end. <block_start>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeo.top25/Total' 'ChromiumPerf/cros-one/dromaeo.top25/*Tot')<line_sep>self._AssertDoesntMatch('ChromiumPerf/cros-one/dromaeo.top25/Total' 'ChromiumPerf/cros-one/dromaeo.top25/otal*')<block_end><def_stmt>testMostSpecificMatchingPattern_SpecificVsGeneral self<block_start>test_key=utils.TestKey('M/B/S/Total')<line_sep>result=utils.MostSpecificMatchingPattern(test_key [('*/*/*/*' 1) ('*/*/*/Total' 2) ('*/*/*/Foo' 3)])<line_sep>self.assertEqual(2 result)<block_end><def_stmt>testMostSpecificMatchingPattern_PartialVsGeneral self<block_start>test_key=utils.TestKey('M/B/S/Total')<line_sep>result=utils.MostSpecificMatchingPattern(test_key [('*/*/*/*' 1) ('*/*/*/To*al' 2) ('*/*/*/Foo' 3)])<line_sep>self.assertEqual(2 result)<block_end><def_stmt>testMostSpecificMatchingPattern_2ndLevel self<block_start>test_key=utils.TestKey('M/B/S/Total')<line_sep>result=utils.MostSpecificMatchingPattern(test_key [('*/*/*/*' 1) ('*/*/S/*' 2) ('*/*/*/Foo' 3)])<line_sep>self.assertEqual(2 result)<block_end><def_stmt>testMostSpecificMatchingPattern_TopLevelSpecificOverLowerSpecific self<block_start>test_key=utils.TestKey('M/B/S/Total')<line_sep>result=utils.MostSpecificMatchingPattern(test_key [('*/*/S/*' 1) ('*/*/*/Total' 2) ('*/*/*/Foo' 3)])<line_sep>self.assertEqual(2 result)<block_end><def_stmt>testMostSpecificMatchingPattern_TopLevelPartialOverLowerSpecific self<block_start>test_key=utils.TestKey('M/B/S/Total')<line_sep>result=utils.MostSpecificMatchingPattern(test_key [('*/*/S/*' 1) ('*/*/*/To*al' 2) ('*/*/*/Foo' 3)])<line_sep>self.assertEqual(2 result)<block_end><def_stmt>_PutEntitiesAllExternal self<block_start>"""Puts entities (none internal-only) and returns the keys."""<line_sep>master=graph_data.Master(id='M').put()<line_sep>graph_data.Bot(parent=master id='b').put()<line_sep>keys=[graph_data.TestMetadata(id='M/b/a' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/b' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/c' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/d' internal_only=<false>).put() ]<line_sep><return>keys<block_end><def_stmt>_PutEntitiesHalfInternal self<block_start>"""Puts entities (half internal-only) and returns the keys."""<line_sep>master=graph_data.Master(id='M').put()<line_sep>graph_data.Bot(parent=master id='b').put()<line_sep>keys=[graph_data.TestMetadata(id='M/b/ax' internal_only=<true>).put() graph_data.TestMetadata(id='M/b/a' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/b' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/bx' internal_only=<true>).put() graph_data.TestMetadata(id='M/b/c' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/cx' internal_only=<true>).put() graph_data.TestMetadata(id='M/b/d' internal_only=<false>).put() graph_data.TestMetadata(id='M/b/dx' internal_only=<true>).put() ]<line_sep><return>keys<block_end><def_stmt>testGetMulti_ExternalUser_ReturnsSomeEntities self<block_start>keys=self._PutEntitiesHalfInternal()<line_sep>self.SetCurrentUser('<EMAIL>')<line_sep>self.assertEqual(len(keys)/2 len(utils.GetMulti(keys)))<block_end><def_stmt>testGetMulti_InternalUser_ReturnsAllEntities self<block_start>keys=self._PutEntitiesHalfInternal()<line_sep>self.SetCurrentUser('<EMAIL>')<line_sep>self.assertEqual(len(keys) len(utils.GetMulti(keys)))<block_end><def_stmt>testGetMulti_AllExternalEntities_ReturnsAllEntities self<block_start>keys=self._PutEntitiesAllExternal()<line_sep>self.SetCurrentUser('<EMAIL>')<line_sep>self.assertEqual(len(keys) len(utils.GetMulti(keys)))<block_end><def_stmt>testTestPath_Test self<block_start>key=ndb.Key('Master' 'm' 'Bot' 'b' 'Test' 'suite' 'Test' 'metric')<line_sep>self.assertEqual('m/b/suite/metric' utils.TestPath(key))<block_end><def_stmt>testTestPath_TestMetadata self<block_start>key=ndb.Key('TestMetadata' 'm/b/suite/metric')<line_sep>self.assertEqual('m/b/suite/metric' utils.TestPath(key))<block_end><def_stmt>testTestPath_Container self<block_start>key=ndb.Key('TestContainer' 'm/b/suite/metric')<line_sep>self.assertEqual('m/b/suite/metric' utils.TestPath(key))<block_end><def_stmt>testTestMetadataKey_None self<block_start>key=utils.TestMetadataKey(<none>)<line_sep>self.assertIsNone(key)<block_end><def_stmt>testTestMetadataKey_Test self<block_start>key=utils.TestMetadataKey(ndb.Key('Master' 'm' 'Bot' 'b' 'Test' 'suite' 'Test' 'metric'))<line_sep>self.assertEqual('TestMetadata' key.kind())<line_sep>self.assertEqual('m/b/suite/metric' key.id())<line_sep>self.assertEqual(('TestMetadata' 'm/b/suite/metric') key.flat())<block_end><def_stmt>testTestMetadataKey_TestMetadata self<block_start>original_key=ndb.Key('TestMetadata' 'm/b/suite/metric')<line_sep>key=utils.TestMetadataKey(original_key)<line_sep>self.assertEqual(original_key key)<block_end><def_stmt>testTestMetadataKey_String self<block_start>key=utils.TestMetadataKey('m/b/suite/metric/page')<line_sep>self.assertEqual('TestMetadata' key.kind())<line_sep>self.assertEqual('m/b/suite/metric/page' key.id())<line_sep>self.assertEqual(('TestMetadata' 'm/b/suite/metric/page') key.flat())<block_end><def_stmt>testOldStyleTestKey_None self<block_start>key=utils.OldStyleTestKey(<none>)<line_sep>self.assertIsNone(key)<block_end><def_stmt>testOldStyleTestKey_Test self<block_start>original_key=ndb.Key('Master' 'm' 'Bot' 'b' 'Test' 'suite' 'Test' 'metric')<line_sep>key=utils.OldStyleTestKey(original_key)<line_sep>self.assertEqual(original_key key)<block_end><def_stmt>testOldStyleTestKey_TestMetadata self<block_start>key=utils.OldStyleTestKey(ndb.Key('TestMetadata' 'm/b/suite/metric'))<line_sep>self.assertEqual('Test' key.kind())<line_sep>self.assertEqual('metric' key.id())<line_sep>self.assertEqual(('Master' 'm' 'Bot' 'b' 'Test' 'suite' 'Test' 'metric') key.flat())<block_end><def_stmt>testOldStyleTestKey_String self<block_start>key=utils.OldStyleTestKey('m/b/suite/metric')<line_sep>self.assertEqual('Test' key.kind())<line_sep>self.assertEqual('metric' key.id())<line_sep>self.assertEqual(('Master' 'm' 'Bot' 'b' 'Test' 'suite' 'Test' 'metric') key.flat())<block_end><def_stmt>testTestSuiteName_Basic self<block_start>key=utils.TestKey('Master/bot/suite-foo/sub/x/y/z')<line_sep>self.assertEqual('suite-foo' utils.TestSuiteName(key))<block_end><def_stmt>testMinimumRange_Empty_ReturnsNone self<block_start>self.assertIsNone(utils.MinimumRange([]))<block_end><def_stmt>testMinimumRange_NotOverlapping_ReturnsNone self<block_start>self.assertIsNone(utils.MinimumRange([(5 10) (15 20)]))<block_end><def_stmt>testMinimumRange_OneRange_ReturnsSameRange self<block_start>self.assertEqual((5 10) utils.MinimumRange([(5 10)]))<block_end><def_stmt>testMinimumRange_OverlapsForOneNumber_ReturnsRangeWithOneNumber self<block_start>self.assertEqual((5 5) utils.MinimumRange([(2 5) (5 10)]))<block_end><def_stmt>testMinimumRange_MoreThanTwoRanges_ReturnsIntersection self<block_start>self.assertEqual((6 14) utils.MinimumRange([(3 20) (5 15) (6 25) (3 14)]))<block_end><def_stmt>testValidate_StringNotInOptionList_Fails self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>utils.Validate(['completed' 'pending' 'failed'] 'running')<block_end><block_end><def_stmt>testValidate_InvalidType_Fails self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>utils.Validate(int 'a string')<block_end><block_end><def_stmt>testValidate_MissingProperty_Fails self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>utils.Validate({'status':str 'try_job_id':int 'required_property':int} {'status':'completed' 'try_job_id':1234})<block_end><block_end><def_stmt>testValidate_InvalidTypeInDict_Fails self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>utils.Validate({'status':int 'try_job_id':int} {'status':'completed' 'try_job_id':1234})<block_end><block_end><def_stmt>testValidate_StringNotInNestedOptionList_Fails self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>utils.Validate({'values':{'nested_values':['orange' 'banana']}} {'values':{'nested_values':'apple'}})<block_end><block_end><def_stmt>testValidate_MissingPropertyInNestedDict_Fails self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>utils.Validate({'values':{'nested_values':['orange' 'banana']}} {'values':{}})<block_end><block_end><def_stmt>testValidate_ExpectedValueIsNone_Passes self<block_start>utils.Validate(<none> 'running')<block_end><def_stmt>testValidate_StringInOptionList_Passes self<block_start>utils.Validate(str 'a string')<block_end><def_stmt>testValidate_HasExpectedProperties_Passes self<block_start>utils.Validate({'status':str 'try_job_id':int} {'status':'completed' 'try_job_id':1234})<block_end><def_stmt>testValidate_StringInNestedOptionList_Passes self<block_start>utils.Validate({'values':{'nested_values':['orange' 'banana']}} {'values':{'nested_values':'orange'}})<block_end><def_stmt>testValidate_TypeConversion_Passes self<block_start>utils.Validate([1] '1')<block_end><def_stmt>testGetBuildDetailsFromStdioLink_InvalidLink self<block_start>base_url,master,bot,number,step=utils.GetBuildDetailsFromStdioLink('[Buildbot stdio](http://notquite/builders/whatever/234)')<line_sep>self.assertIsNone(base_url)<line_sep>self.assertIsNone(master)<line_sep>self.assertIsNone(bot)<line_sep>self.assertIsNone(number)<line_sep>self.assertIsNone(step)<block_end><def_stmt>testGetBuildDetailsFromStdioLink self<block_start>base_url,master,bot,number,step=utils.GetBuildDetailsFromStdioLink(('[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'<concat>'Android%20One%20Perf%20%282%29/builds/5365/steps/'<concat>'blink_style.top_25/logs/stdio)'))<line_sep>self.assertEqual('https://build.chromium.org/p/chromium.perf/builders/' base_url)<line_sep>self.assertEqual('chromium.perf' master)<line_sep>self.assertEqual('Android One Perf (2)' bot)<line_sep>self.assertEqual('5365' number)<line_sep>self.assertEqual('blink_style.top_25' step)<block_end><def_stmt>testGetBuildDetailsFromStdioLink_DifferentBaseUrl self<block_start>base_url,master,bot,number,step=utils.GetBuildDetailsFromStdioLink(('[Buildbot stdio]('<concat>'https://uberchromegw.corp.google.com/i/new.master/builders/Builder/'<concat>'builds/3486/steps/new_test/logs/stdio)'))<line_sep>self.assertEqual('https://uberchromegw.corp.google.com/i/new.master/builders/' base_url)<line_sep>self.assertEqual('new.master' master)<line_sep>self.assertEqual('Builder' bot)<line_sep>self.assertEqual('3486' number)<line_sep>self.assertEqual('new_test' step)<block_end><def_stmt>testGetBuildbotStatusPageUriFromStdioLink self<block_start>buildbot_status_page=utils.GetBuildbotStatusPageUriFromStdioLink(('[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'<concat>'Android%20One%20Perf%20%282%29/builds/5365/steps/'<concat>'blink_style.top_25/logs/stdio)'))<line_sep>self.assertEqual(('https://build.chromium.org/p/chromium.perf/builders/'<concat>'Android%20One%20Perf%20%282%29/builds/5365') buildbot_status_page)<block_end><def_stmt>testGetLogdogLogUriFromStdioLink self<block_start>logdog_uri=utils.GetLogdogLogUriFromStdioLink(('[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'<concat>'Android%20One%20Perf%20%282%29/builds/5365/steps/'<concat>'blink_style.top_25/logs/stdio)'))<line_sep>self.assertEqual(('https://luci-logdog.appspot.com/v/?s='<concat>'chrome%2Fbb%2Fchromium.perf%2FAndroid_One_Perf__2_%2F5365%2F%2B%2F'<concat>'recipes%2Fsteps%2Fblink_style.top_25%2F0%2Fstdout') logdog_uri)<block_end>@mock.patch.object(utils 'ServiceAccountHttp' mock.MagicMock())@mock.patch('common.utils.discovery.build')<def_stmt>testIsGroupMember_PositiveCase self mock_discovery_build<block_start>mock_request=mock.MagicMock()<line_sep>mock_request.execute=mock.MagicMock(return_value={'is_member':<true>})<line_sep>mock_service=mock.MagicMock()<line_sep>mock_service.membership=mock.MagicMock(return_value=mock_request)<line_sep>mock_discovery_build.return_value=mock_service<line_sep>self.assertTrue(utils.IsGroupMember('<EMAIL>' 'group'))<line_sep>mock_service.membership.assert_called_once_with(identity='<EMAIL>' group='group')<block_end>@mock.patch.object(utils 'ServiceAccountHttp' mock.MagicMock())@mock.patch('logging.error')@mock.patch('common.utils.discovery.build')<def_stmt>testIsGroupMember_RequestFails_LogsErrorAndReturnsFalse self mock_discovery_build mock_logging_error<block_start>mock_service=mock.MagicMock()<line_sep>mock_service.membership=mock.MagicMock(return_value={'error':'Some error'})<line_sep>mock_discovery_build.return_value=mock_service<line_sep>self.assertFalse(utils.IsGroupMember('<EMAIL>' 'group'))<line_sep>self.assertEqual(1 mock_logging_error.call_count)<block_end><def_stmt>testGetSheriffForAutorollCommit_InvalidCommit_ReturnsNone self<block_start>self.assertIsNone(utils.GetSheriffForAutorollCommit(<none>))<line_sep>self.assertIsNone(utils.GetSheriffForAutorollCommit({}))<line_sep>self.assertIsNone(utils.GetSheriffForAutorollCommit({'author':{}}))<block_end><def_stmt>testGetSheriffForAutorollCommit_NotAutoroll_ReturnsNone self<block_start>self.assertIsNone(utils.GetSheriffForAutorollCommit({'author':{'email':'<EMAIL>'} 'message':'TBR=<EMAIL>' }))<line_sep>self.assertIsNone(utils.GetSheriffForAutorollCommit({'author':{'email':'<EMAIL>'} 'message':'TBR=<EMAIL>' }))<block_end><def_stmt>testGetSheriffForAutorollCommit_AutoRoll_ReturnsSheriff self<block_start>self.assertEqual('<EMAIL>' utils.GetSheriffForAutorollCommit({'author':{'email':'<EMAIL>' } 'message':'This is a roll.\n\nTBR=<EMAIL>,<EMAIL>\n\n' }))<line_sep>self.assertEqual('<EMAIL>' utils.GetSheriffForAutorollCommit({'author':{'email':'<EMAIL>' } 'message':'TBR=<EMAIL>' }))<line_sep>self.assertEqual('<EMAIL>' utils.GetSheriffForAutorollCommit({'tbr':'<EMAIL>'}))<block_end><block_end><def_stmt>_MakeMockFetch base64_encoded=<true> status=200<block_start>"""Returns a mock fetch object that returns a canned response."""<def_stmt>_MockFetch _<block_start>response_text=json.dumps({'key':'this is well-formed JSON.'})<if_stmt>base64_encoded<block_start>response_text=base64.b64encode(response_text)<block_end><return>testing_common.FakeResponseObject(status response_text)<block_end><return>_MockFetch<block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('integers' metavar='N' type=int nargs='+')<line_sep>parser.add_argument('-f' '--foo' help='foo help')<line_sep>parser.add_argument('-b' '--bar' help='bar help')<line_sep>parser.add_argument('-z' '--baz' help='baz help')<line_sep>parser.add_argument('-t' '--turn-on' action='store_true')<line_sep>parser.add_argument('-x' '--exclude' action='store_false')<line_sep>parser.add_argument('-s' '--start' action='store_true')<line_sep>args=parser.parse_args()<line_sep>print(args)<line_sep>
# terrascript/data/rancher/rancher2.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:25:37 UTC) <import_stmt>terrascript<class_stmt>rancher2_app(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_catalog(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_catalog_v2(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_certificate(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cloud_credential(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_alert_group(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_alert_rule(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_driver(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_logging(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_role_template_binding(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_scan(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_template(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_cluster_v2(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_etcd_backup(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_global_dns_provider(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_global_role(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_global_role_binding(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_multi_cluster_app(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_namespace(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_node_driver(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_node_pool(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_node_template(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_notifier(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_pod_security_policy_template(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_project(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_project_alert_group(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_project_alert_rule(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_project_logging(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_project_role_template_binding(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_registry(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_role_template(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_secret(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_secret_v2(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_setting(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_storage_class_v2(terrascript.Data)<block_start><pass><block_end><class_stmt>rancher2_user(terrascript.Data)<block_start><pass><block_end>__all__=["rancher2_app" "rancher2_catalog" "rancher2_catalog_v2" "rancher2_certificate" "rancher2_cloud_credential" "rancher2_cluster" "rancher2_cluster_alert_group" "rancher2_cluster_alert_rule" "rancher2_cluster_driver" "rancher2_cluster_logging" "rancher2_cluster_role_template_binding" "rancher2_cluster_scan" "rancher2_cluster_template" "rancher2_cluster_v2" "rancher2_etcd_backup" "rancher2_global_dns_provider" "rancher2_global_role" "rancher2_global_role_binding" "rancher2_multi_cluster_app" "rancher2_namespace" "rancher2_node_driver" "rancher2_node_pool" "rancher2_node_template" "rancher2_notifier" "rancher2_pod_security_policy_template" "rancher2_project" "rancher2_project_alert_group" "rancher2_project_alert_rule" "rancher2_project_logging" "rancher2_project_role_template_binding" "rancher2_registry" "rancher2_role_template" "rancher2_secret" "rancher2_secret_v2" "rancher2_setting" "rancher2_storage_class_v2" "rancher2_user" ]<line_sep>
<def_stmt>StringVersion seq<block_start><return>'.'.join(['%s']<times>len(seq))%tuple(seq)<block_end><def_stmt>TupleVersion str<block_start><return>map(int str.split('.'))<block_end>
<import_from_stmt>common.sagemaker_component SageMakerComponent SageMakerJobStatus<import_from_stmt>deploy.src.sagemaker_deploy_spec SageMakerDeploySpec<import_from_stmt>deploy.src.sagemaker_deploy_component EndpointRequests SageMakerDeployComponent <import_from_stmt>tests.unit_tests.tests.deploy.test_deploy_spec DeploySpecTestCase<import_stmt>unittest<import_from_stmt>unittest.mock patch MagicMock ANY<class_stmt>DeployComponentTestCase(unittest.TestCase)<block_start>REQUIRED_ARGS=DeploySpecTestCase.REQUIRED_ARGS<line_sep>@classmethod<def_stmt>setUp cls<block_start>cls.component=SageMakerDeployComponent()<line_sep># Instantiate without calling Do() cls.component._endpoint_config_name="endpoint-config"<line_sep>cls.component._endpoint_name="endpoint"<line_sep>cls.component._should_update_existing=<false><block_end>@patch("deploy.src.sagemaker_deploy_component.super" MagicMock())<def_stmt>test_do_sets_name self<block_start>given_endpoint_name=SageMakerDeploySpec(self.REQUIRED_ARGS+["--endpoint_name" "my-endpoint"])<line_sep>given_endpoint_config_name=SageMakerDeploySpec(self.REQUIRED_ARGS+["--endpoint_config_name" "my-endpoint-config"])<line_sep>unnamed_spec=SageMakerDeploySpec(self.REQUIRED_ARGS)<with_stmt>patch("deploy.src.sagemaker_deploy_component.SageMakerComponent._generate_unique_timestamped_id" MagicMock(return_value="-generated") )<block_start>self.component.Do(given_endpoint_name)<line_sep>self.assertEqual("EndpointConfig-generated" self.component._endpoint_config_name)<line_sep>self.assertEqual("my-endpoint" self.component._endpoint_name)<line_sep>self.component.Do(given_endpoint_config_name)<line_sep>self.assertEqual("my-endpoint-config" self.component._endpoint_config_name)<line_sep>self.assertEqual("Endpoint-generated" self.component._endpoint_name)<line_sep>self.component.Do(unnamed_spec)<line_sep>self.assertEqual("EndpointConfig-generated" self.component._endpoint_config_name)<line_sep>self.assertEqual("Endpoint-generated" self.component._endpoint_name)<block_end><block_end>@patch("deploy.src.sagemaker_deploy_component.super" MagicMock())<def_stmt>test_update_endpoint_do_sets_name self<block_start>given_endpoint_name=SageMakerDeploySpec(self.REQUIRED_ARGS+["--endpoint_name" "my-endpoint" "--update_endpoint" "True"])<line_sep>given_endpoint_config_name=SageMakerDeploySpec(self.REQUIRED_ARGS+["--endpoint_config_name" "my-endpoint-config" "--update_endpoint" "True" ])<line_sep>unnamed_spec=SageMakerDeploySpec(self.REQUIRED_ARGS)<line_sep>SageMakerDeployComponent._generate_unique_timestamped_id=MagicMock(return_value="-generated-update")<line_sep>self.component._endpoint_name_exists=MagicMock(return_value=<true>)<line_sep>self.component._get_endpoint_config=MagicMock(return_value="existing-config")<with_stmt>patch("deploy.src.sagemaker_deploy_component.SageMakerComponent._generate_unique_timestamped_id" MagicMock(return_value="-generated-update") )<block_start>self.component.Do(given_endpoint_name)<line_sep>self.assertEqual("EndpointConfig-generated-update" self.component._endpoint_config_name)<line_sep>self.assertEqual("my-endpoint" self.component._endpoint_name)<line_sep>self.assertTrue(self.component._should_update_existing)<line_sep># Ignore given endpoint config name for update self.component.Do(given_endpoint_config_name)<line_sep>self.assertEqual("EndpointConfig-generated-update" self.component._endpoint_config_name)<line_sep>self.assertEqual("Endpoint-generated-update" self.component._endpoint_name)<line_sep>self.assertTrue(self.component._should_update_existing)<line_sep>self.component.Do(unnamed_spec)<line_sep>self.assertEqual("EndpointConfig-generated-update" self.component._endpoint_config_name)<line_sep>self.assertEqual("Endpoint-generated-update" self.component._endpoint_name)<line_sep>self.assertFalse(self.component._should_update_existing)<block_end><block_end><def_stmt>test_create_deploy_job_requests self<block_start>spec=SageMakerDeploySpec(self.REQUIRED_ARGS)<line_sep>request=self.component._create_job_request(spec.inputs spec.outputs)<line_sep>self.assertEqual(request EndpointRequests(config_request={"EndpointConfigName":"endpoint-config" "ProductionVariants":[{"VariantName":"variant-name-1" "ModelName":"model-test" "InitialInstanceCount":1 "InstanceType":"ml.m4.xlarge" "InitialVariantWeight":1.0 }] "Tags":[] } endpoint_request={"EndpointName":"endpoint" "EndpointConfigName":"endpoint-config" } ) )<block_end><def_stmt>test_create_update_deploy_job_requests self<block_start>spec=SageMakerDeploySpec(self.REQUIRED_ARGS)<line_sep>self.component._should_update_existing=<true><line_sep>request=self.component._create_job_request(spec.inputs spec.outputs)<line_sep>self.assertEqual(request EndpointRequests(config_request={"EndpointConfigName":"endpoint-config" "ProductionVariants":[{"VariantName":"variant-name-1" "ModelName":"model-test" "InitialInstanceCount":1 "InstanceType":"ml.m4.xlarge" "InitialVariantWeight":1.0 }] "Tags":[] } endpoint_request={"EndpointName":"endpoint" "EndpointConfigName":"endpoint-config" } ) )<block_end><def_stmt>test_create_deploy_job_multiple_variants self<block_start>spec=SageMakerDeploySpec(self.REQUIRED_ARGS+["--variant_name_1" "variant-test-1" "--initial_instance_count_1" "1" "--instance_type_1" "t1" "--initial_variant_weight_1" "0.1" "--accelerator_type_1" "ml.eia1.medium" "--model_name_2" "model-test-2" "--variant_name_2" "variant-test-2" "--initial_instance_count_2" "2" "--instance_type_2" "t2" "--initial_variant_weight_2" "0.2" "--accelerator_type_2" "ml.eia1.large" ])<line_sep>request=self.component._create_job_request(spec.inputs spec.outputs)<line_sep>self.assertEqual(request EndpointRequests(config_request={"EndpointConfigName":"endpoint-config" "ProductionVariants":[{"VariantName":"variant-test-1" "ModelName":"model-test" "InitialInstanceCount":1 "InstanceType":"t1" "InitialVariantWeight":0.1 "AcceleratorType":"ml.eia1.medium" } {"VariantName":"variant-test-2" "ModelName":"model-test-2" "InitialInstanceCount":2 "InstanceType":"t2" "InitialVariantWeight":0.2 "AcceleratorType":"ml.eia1.large" } ] "Tags":[] } endpoint_request={"EndpointName":"endpoint" "EndpointConfigName":"endpoint-config" } ) )<block_end><def_stmt>test_get_job_status self<block_start>self.component._sm_client=mock_client=MagicMock()<line_sep>self.component._sm_client.describe_endpoint.return_value={"EndpointStatus":"Creating"}<line_sep>self.assertEqual(self.component._get_job_status() SageMakerJobStatus(is_completed=<false> raw_status="Creating") )<line_sep>self.component._sm_client.describe_endpoint.return_value={"EndpointStatus":"Updating"}<line_sep>self.assertEqual(self.component._get_job_status() SageMakerJobStatus(is_completed=<false> raw_status="Updating") )<line_sep>self.component._sm_client.describe_endpoint.return_value={"EndpointStatus":"InService"}<line_sep>self.assertEqual(self.component._get_job_status() SageMakerJobStatus(is_completed=<true> raw_status="InService") )<line_sep>self.component._sm_client.describe_endpoint.return_value={"EndpointStatus":"Failed" "FailureReason":"lolidk" }<line_sep>self.assertEqual(self.component._get_job_status() SageMakerJobStatus(is_completed=<true> raw_status="Failed" has_error=<true> error_message="lolidk" ) )<block_end><def_stmt>test_after_job_completed self<block_start>spec=SageMakerDeploySpec(self.REQUIRED_ARGS)<line_sep>self.component._after_job_complete({} {} spec.inputs spec.outputs)<line_sep>self.assertEqual(spec.outputs.endpoint_name "endpoint")<block_end><def_stmt>test_submit_update_job_request self<block_start>self.component._should_update_existing=<true><line_sep>self.component._existing_endpoint_config_name="old-config"<line_sep>self.component._delete_endpoint_config=MagicMock(return_value=<true>)<line_sep>self.component._sm_client=MagicMock()<line_sep>requests=EndpointRequests(config_request={"EndpointConfigName":"endpoint-config" "ProductionVariants":[{"VariantName":"variant-test-1" "ModelName":"model-test" "InitialInstanceCount":1 "InstanceType":"t1" "InitialVariantWeight":0.1 "AcceleratorType":"ml.eia1.medium" } {"VariantName":"variant-test-2" "ModelName":"model-test-2" "InitialInstanceCount":2 "InstanceType":"t2" "InitialVariantWeight":0.2 "AcceleratorType":"ml.eia1.large" } ] "Tags":[] } endpoint_request={"EndpointName":"endpoint" "EndpointConfigName":"endpoint-config" } )<line_sep>self.component._submit_job_request(requests)<line_sep>self.component._sm_client.update_endpoint.assert_called_once_with(**{"EndpointName":"endpoint" "EndpointConfigName":"endpoint-config" })<line_sep>self.component._delete_endpoint_config.assert_called_once_with("old-config")<block_end><block_end>
""" Tests for admin endpoints """<import_from_stmt>mock MagicMock<import_from_stmt>pyramid.httpexceptions HTTPBadRequest<import_from_stmt>pypicloud.views.admin AdminEndpoints<import_from_stmt>. MockServerTest<class_stmt>TestAdmin(MockServerTest)<block_start>"""Tests for admin endpoints"""<def_stmt>setUp self<block_start>super(TestAdmin self).setUp()<line_sep>self.access=self.request.access=MagicMock()<block_end><def_stmt>test_rebuild self<block_start>"""Rebuild endpoint refreshes db cache"""<line_sep>self.request.db=MagicMock()<line_sep>AdminEndpoints(self.request).rebuild_package_list()<line_sep>self.assertTrue(self.request.db.reload_from_storage.called)<block_end><def_stmt>test_get_pending_users self<block_start>"""Retrieve pending users from access backend"""<line_sep>ret=AdminEndpoints(self.request).get_pending_users()<line_sep>self.assertEqual(ret self.access.pending_users())<block_end><def_stmt>test_get_users self<block_start>"""Retrieve all users from access backend"""<line_sep>ret=AdminEndpoints(self.request).get_users()<line_sep>self.assertEqual(ret self.access.user_data())<block_end><def_stmt>test_get_user self<block_start>"""Retrieve data for a single user"""<line_sep>self.request.named_subpaths={"username":"a"}<line_sep>ret=AdminEndpoints(self.request).get_user()<line_sep>self.access.user_data.assert_called_with("a")<line_sep>self.assertEqual(ret self.access.user_data())<block_end><def_stmt>test_delete_user self<block_start>"""Delete user from access backend"""<line_sep>self.request.named_subpaths={"username":"a"}<line_sep>AdminEndpoints(self.request).delete_user()<line_sep>self.access.delete_user.assert_called_with("a")<block_end><def_stmt>test_approve_user self<block_start>"""Approve a pending user"""<line_sep>self.request.named_subpaths={"username":"a"}<line_sep>AdminEndpoints(self.request).approve_user()<line_sep>self.access.approve_user.assert_called_with("a")<block_end><def_stmt>test_set_admin_status self<block_start>"""Set the admin flag for a user"""<line_sep>self.request.named_subpaths={"username":"a"}<line_sep>AdminEndpoints(self.request).set_admin_status(<true>)<line_sep>self.access.set_user_admin.assert_called_with("a" <true>)<block_end><def_stmt>test_add_group_member self<block_start>"""Add a user to a group"""<line_sep>self.request.named_subpaths={"username":"a" "group":"b"}<line_sep>self.request.method="PUT"<line_sep>AdminEndpoints(self.request).mutate_group_member()<line_sep>self.access.edit_user_group.assert_called_with("a" "b" <true>)<block_end><def_stmt>test_remove_group_member self<block_start>"""Remove a user from a group"""<line_sep>self.request.named_subpaths={"username":"a" "group":"b"}<line_sep>self.request.method="DELETE"<line_sep>AdminEndpoints(self.request).mutate_group_member()<line_sep>self.access.edit_user_group.assert_called_with("a" "b" <false>)<block_end><def_stmt>test_get_groups self<block_start>"""Retrieve list of all groups"""<line_sep>ret=AdminEndpoints(self.request).get_groups()<line_sep>self.assertEqual(ret self.access.groups())<block_end><def_stmt>test_delete_group self<block_start>"""Delete a group"""<line_sep>self.request.named_subpaths={"group":"a"}<line_sep>AdminEndpoints(self.request).delete_group()<line_sep>self.access.delete_group.assert_called_with("a")<block_end><def_stmt>test_get_user_permissions self<block_start>"""Get a user's permissions from the access backend"""<line_sep>self.request.named_subpaths={"username":"a"}<line_sep>ret=AdminEndpoints(self.request).get_user_permissions()<line_sep>self.access.user_package_permissions.assert_called_with("a")<line_sep>self.assertEqual(ret self.access.user_package_permissions())<block_end><def_stmt>test_get_group self<block_start>"""Get a group's members and permissions"""<line_sep>self.request.named_subpaths={"group":"a"}<line_sep>ret=AdminEndpoints(self.request).get_group()<line_sep>self.access.group_members.assert_called_with("a")<line_sep>self.access.group_package_permissions.assert_called_with("a")<line_sep>self.assertEqual(ret {"members":self.access.group_members() "packages":self.access.group_package_permissions() } )<block_end><def_stmt>test_get_package_permissions self<block_start>"""Get user and group permissions for a package"""<line_sep>self.request.named_subpaths={"package":"a"}<line_sep>self.access.user_permissions.return_value={"u1":["read"]}<line_sep>self.access.group_permissions.return_value={"g1":["read" "write"]}<line_sep>ret=AdminEndpoints(self.request).get_package_permissions()<line_sep>self.assertEqual(ret {"user":[{"username":"u1" "permissions":["read"]}] "group":[{"group":"g1" "permissions":["read" "write"]}] } )<block_end><def_stmt>test_create_group self<block_start>"""Create a group"""<line_sep>self.request.named_subpaths={"group":"a"}<line_sep>AdminEndpoints(self.request).create_group()<line_sep>self.access.create_group.assert_called_with("a")<block_end><def_stmt>test_no_create_everyone_group self<block_start>"""Cannot create the 'everyone' group"""<line_sep>self.request.named_subpaths={"group":"everyone"}<line_sep>ret=AdminEndpoints(self.request).create_group()<line_sep>self.assertTrue(isinstance(ret HTTPBadRequest))<block_end><def_stmt>test_no_create_authenticated_group self<block_start>"""Cannot create the 'authenticated' group"""<line_sep>self.request.named_subpaths={"group":"authenticated"}<line_sep>ret=AdminEndpoints(self.request).create_group()<line_sep>self.assertTrue(isinstance(ret HTTPBadRequest))<block_end><def_stmt>test_add_user_permission self<block_start>"""Add a user permission to a package"""<line_sep>self.request.named_subpaths={"type":"user" "package":"p" "name":"u" "permission":"read" }<line_sep>self.request.method="PUT"<line_sep>AdminEndpoints(self.request).edit_permission()<line_sep>self.access.edit_user_permission.assert_called_with("p" "u" "read" <true>)<block_end><def_stmt>test_remove_user_permission self<block_start>"""Remove a user permission from a package"""<line_sep>self.request.named_subpaths={"type":"user" "package":"p" "name":"u" "permission":"read" }<line_sep>self.request.method="DELETE"<line_sep>AdminEndpoints(self.request).edit_permission()<line_sep>self.access.edit_user_permission.assert_called_with("p" "u" "read" <false>)<block_end><def_stmt>test_add_group_permission self<block_start>"""Add a group permission to a package"""<line_sep>self.request.named_subpaths={"type":"group" "package":"p" "name":"g" "permission":"read" }<line_sep>self.request.method="PUT"<line_sep>AdminEndpoints(self.request).edit_permission()<line_sep>self.access.edit_group_permission.assert_called_with("p" "g" "read" <true>)<block_end><def_stmt>test_remove_group_permission self<block_start>"""Remove a group permission from a package"""<line_sep>self.request.named_subpaths={"type":"group" "package":"p" "name":"g" "permission":"read" }<line_sep>self.request.method="DELETE"<line_sep>AdminEndpoints(self.request).edit_permission()<line_sep>self.access.edit_group_permission.assert_called_with("p" "g" "read" <false>)<block_end><def_stmt>test_toggle_allow_register self<block_start>"""Toggle registration enabled"""<line_sep>AdminEndpoints(self.request).toggle_allow_register(<true>)<line_sep>self.access.set_allow_register.assert_called_with(<true>)<block_end><block_end>
"""Make unit test for poly1305_reduce()"""<import_from_stmt>common counter make_main split32<def_stmt>make_test value<block_start>result=value%(2<power>130-5)<line_sep>h_in=split32(value 5)<line_sep>h_out=split32(result 5)<line_sep>print("")<line_sep>print("void test_%d() {"%next(counter))<line_sep>print(" uint32_t h[5] = {"+", ".join(h_in)+"};")<line_sep>print(" const uint32_t expected_h[5] = {"+", ".join(h_out)+"};")<line_sep>print("")<line_sep>print(" poly1305_reduce(h);")<line_sep>print(" assert(memcmp(h, expected_h, sizeof(h)) == 0);")<line_sep>print("}")<line_sep>print("")<block_end>print("#ifdef NDEBUG")<line_sep>print("#undef NDEBUG")<line_sep>print("#endif")<line_sep>print("#include <assert.h>")<line_sep>print("#include <string.h>")<line_sep>print("#include <stdint.h>")<line_sep>print("#include <stdio.h>")<line_sep>print()<line_sep>print("void poly1305_reduce(uint32_t h[5]);")<line_sep>make_test(0)<line_sep>make_test(2<power>130-5-1)<line_sep>make_test(2<power>130-5)<line_sep>make_test(2<power>130-5+1)<line_sep>make_test(2<times>(2<power>130-5))<line_sep>make_test(2<times>(2<power>130-5)+9)<line_sep># make_test(2*(2**130-5)+10) - Fails, since h[5] takes more than 3 bits make_main()<line_sep>
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. <import_stmt>dace<import_from_stmt>dace.memlet Memlet<import_stmt>numpy<as>np<line_sep>sr=dace.SDFG('strided_range_test')<line_sep>s0=sr.add_state('s0')<line_sep>A=s0.add_array('A' [2 16 4] dace.float32)<line_sep>B=s0.add_array('B' [16] dace.float32)<line_sep>tasklet=s0.add_tasklet('srtest' {'a'} {'b'} """ b[0] = a[0,0] * 2 b[1] = a[0,1] * 2 b[2] = a[1,0] * 2 b[3] = a[1,1] * 2 """)<line_sep>me,mx=s0.add_map('srmap' dict(i='0:4'))<line_sep># Reading A at [1, 2i:2i+8:8:2, 3] s0.add_memlet_path(A me tasklet dst_conn='a' memlet=Memlet.simple(A '1, 2*i:2*i+10:8:2, 3'))<line_sep># Writing B at [4*i:4*i+4] s0.add_memlet_path(tasklet mx B src_conn='b' memlet=Memlet.simple(B '4*i:4*i+4'))<def_stmt>test <block_start>print('Strided range tasklet test')<line_sep>A=np.random.rand(2 16 4).astype(np.float32)<line_sep>B=np.random.rand(16).astype(np.float32)<line_sep>sr(A=A B=B)<line_sep>diffs=[B[0:2]-2<times>A[1 0:2 3] B[2:4]-2<times>A[1 8:10 3] B[4:6]-2<times>A[1 2:4 3] B[6:8]-2<times>A[1 10:12 3] B[8:10]-2<times>A[1 4:6 3] B[10:12]-2<times>A[1 12:14 3] B[12:14]-2<times>A[1 6:8 3] B[14:16]-2<times>A[1 14:16 3]]<line_sep>diff=np.linalg.norm(np.array(diffs))<line_sep>print('Differences:' [np.linalg.norm(d)<for>d diffs])<assert_stmt>diff<le>1e-5<block_end><if_stmt>__name__<eq>"__main__"<block_start>test()<block_end>
<import_from_stmt>builtins zip<import_from_stmt>builtins range<import_stmt>numpy<as>np<def_stmt>save_data_regresssion # n = 20 # number of labeled/training data # D = 1 # dimension of input data <block_start>x=np.array([[2.083970427750732 -0.821018066101379 -0.617870699182597 -1.183822608860694 0.274087442277144 0.599441729295593 1.768897919204435 -0.465645549031928 0.588852784375935 -0.832982214438054 -0.512106527960363 0.277883144210116 -0.065870426922211 -0.821412363806325 0.185399443778088 -0.858296174995998 0.370786630037059 -1.409869162416639 -0.144668412325022 -0.553299615220374]]).T<line_sep>y=np.array([[4.549203746331698 0.371985574437271 0.711307965514790 -0.013212893618430 2.255473255338191 1.009915749295733 3.744675937965029 0.424592771793202 1.322833652295811 0.278298293510020 0.267229130945574 2.200112286723833 1.200609983308969 0.439971697236094 2.628580433511255 0.503774817336353 1.942525313820564 0.579133950013327 0.670874423968554 0.377353755100965]]).T<line_sep># TEST points # test points evenly distributed in the interval [-2, 2.5] xstar=np.array(list(range(-200 250 4)) dtype=np.float64 ndmin=2).T<line_sep>xstar<augdiv>100<line_sep>np.savez('Regression/regression_data' x=x y=y xstar=xstar)<block_end><def_stmt>save_data_classification # Synthetic data for binary classification: two partially overlapping # Gaussians in two dimensions. 120 data points are generated from two # Gaussians with different means and covariances. One Gaussian is # isotropic and contains 2/3 of the data (blue), the other is highly # correlated and contains 1/3 of the points (red). Note, that the # labels for the targets are -1/+1 (and not 0/1). <block_start>n1=80<line_sep>n2=40<line_sep>x1=np.array([[0.089450165731417 -0.000700765006939] [1.171605560541542 1.177765337635947] [1.404722675089394 -0.017417915887421] [0.556096196907929 -1.489370243839215] [1.213163445267992 0.044545401368647] [0.173404742510759 -0.675668036759603] [2.225008556585363 0.469803193769368] [1.470329290331445 0.887642323697526] [2.715199208821485 0.621044646503113] [0.173640760494328 -0.936054178730056] [2.038152815025167 0.262587298316711] [1.670218375320427 -2.633186886994263] [0.270098501389591 -0.948779657473203] [1.396339236138275 -1.114992287201776] [-1.482070589718501 -0.654590652482805] [-1.493788226272929 0.382017940248275] [1.025083846875763 -0.860344923788873] [0.750316336734172 -0.101864205602753] [0.184311310148912 -0.258523866245887] [0.221868667121623 -1.393954437105630] [2.258881477897777 -0.786806071526136] [1.211362530151533 -0.423431246029886] [1.525307406741207 -0.097975367602030] [0.978930232706465 0.476154349549524] [1.347884229346280 -0.248408186838667] [1.205779546204216 -0.090878327349907] [0.124388644862000 0.599612645000285] [0.784044356662233 0.356596736271853] [1.060216683845210 -0.318474838087900] [1.678114484474938 0.678735373910422] [0.973851135005570 0.024880700382574] [0.016237746864886 -0.480899874254564] [0.979406721923196 0.697708815321128] [2.217307638531248 -0.956931847027775] [2.150475558834153 1.059031573329512] [1.050502393215048 0.532141747419667] [1.210593098269218 -0.318123542280113] [0.426309208807901 -0.571727978045793] [0.742552105732714 -0.122112766396886] [0.757210723588679 0.862002000781123] [-0.431639130160791 -0.763118261936640] [-0.748398486307095 -0.603667649379360] [0.975086541108249 -1.525297946453790] [0.074503762788667 -0.092155036190678] [-0.668889572018935 1.305400680048752] [0.725632503186580 0.096286255882168] [-1.042270707136463 1.297009698531055] [1.943144890398260 -1.051176922438962] [1.191448645802597 0.261349747400059] [0.778004017505022 -1.046301123377022] [0.628873970760607 1.103926629619643] [1.295113890591403 -0.479519217798997] [1.522065175744686 0.993476032742058] [1.100255776045601 0.961069161713818] [-0.593243832838153 -0.479418953496258] [2.023196521366462 -0.275055494808503] [-0.788103134597041 -1.090707985778480] [-0.085168420896236 1.226858390046108] [1.691706923196703 -1.153144804780540] [1.989279380395157 1.974704317386435] [0.398799861652602 3.051291814188982] [-0.707217210772927 0.185505264874794] [0.697550136765320 0.222287208720035] [2.186126058382323 -0.327829143438683] [1.368068331060010 1.708138258453435] [0.883049126818189 -1.334269372314072] [1.737643116893527 0.618452933813739] [2.002228743955222 0.103381966018445] [-0.202638622737115 0.495024938090909] [0.543309203560769 -0.802120609128192] [-1.796161599703804 -0.054795478648902] [1.460693782000059 0.750052171180825] [0.133277872804608 -1.154891068006907] [0.203670382700157 -0.480336687666025] [-0.278985011909341 0.030578590108392] [2.070490237052893 2.420782751903098] [0.599023881366768 -1.673208560658818] [0.140506592147238 0.804938444757444] [-0.980799204108985 -1.847987723222053] [-0.102350006007740 -0.822093851434857]])<line_sep>x2=np.array([[1.160257057434194 1.544111720606185] [-0.458434595629321 0.205667827100987] [-1.053562345687376 -0.614938261650010] [-1.687901005751336 -0.780028275457715] [-0.467035854712698 0.561692074343868] [-0.703391186121452 0.281301267639200] [-1.568557779993616 -0.629129013661319] [-2.176478596101226 -1.176211396013793] [0.768109265900499 1.376893437232103] [-0.514772970064353 0.474264363701950] [-1.301924381487904 -0.525179228127957] [-1.312024947004566 -0.049469442305628] [-0.623417800418214 0.226456899059445] [0.020290591370131 0.374055846421580] [-1.002901826023476 0.076597486786743] [-2.553713136283273 -1.731788289864902] [-1.788156378743716 -0.742460481943494] [-1.119582270077321 -0.256154464598782] [-0.423084091988017 0.395108309297119] [-1.645945345460644 -1.216319293733455] [0.227805611684674 0.925948003854262] [-1.298719171366801 -0.965511301629466] [-0.618292817021891 0.140045887498202] [0.794935039731655 1.917830760420081] [-0.213709179946402 0.617751634356751] [-0.474251035850546 -0.054854432018974] [0.056077816960464 1.046282980014428] [0.887136693467512 1.536490289895764] [1.377161915854166 1.764872700787871] [-0.901195709427863 -0.340855547886558] [-0.783104424735034 -0.330927422324566] [-1.507139570543989 0.137504213149820] [-0.348999111724700 0.235931187612453] [-0.367309385513174 0.655996377722041] [-0.050622309620072 0.410969334468070] [1.734919039047271 2.611080177877894] [-0.567413078682755 -0.458249564234885] [-0.622230797920433 0.258401595566888] [-1.642146761593230 -1.138579130251617] [-0.285298076847255 0.085451489400687]])<line_sep>x=np.concatenate((x1 x2) axis=0)<line_sep>y=np.concatenate((-np.ones((1 n1)) np.ones((1 n2))) axis=1).T<line_sep># For plotting, we superimpose the data points with the posterior equi-probability contour # lines for the probability of class two given complete information about the generating mechanism. t1,t2=np.meshgrid(np.arange(-4 4.1 0.1) np.arange(-4 4.1 0.1))<line_sep>t=np.array(list(zip(np.reshape(t1 (np.prod(t1.shape) )) np.reshape(t2 (np.prod(t2.shape) )))))# these are the test inputs n=t.shape[0]<line_sep>tmm=np.zeros_like(t)<line_sep>S1=np.eye(2)<line_sep>S2=np.array([[1 0.95] [0.95 1]])<line_sep>m1=np.array([0.75 0])<line_sep>m2=np.array([-0.75 0])<line_sep>tmm[: 0]=t[: 0]-m1[0]<line_sep>tmm[: 1]=t[: 1]-m1[1]<line_sep>p1=n1<times>np.exp((-np.dot(tmm np.linalg.inv(S1))<times>tmm/2).sum(axis=1))<line_sep>tmm[: 0]=t[: 0]-m2[0]<line_sep>tmm[: 1]=t[: 1]-m2[1]<line_sep>S2i=np.linalg.inv(S2)<line_sep>p2=n2<times>np.exp((-np.dot(tmm S2i)<times>tmm/2).sum(axis=1))/np.sqrt(0.0975)<line_sep>np.savez('Classification/classification_data' x=x y=y xstar=t x1=x1 x2=x2 t1=t1 t2=t2 p1=p1 p2=p2)<block_end><if_stmt>__name__<eq>'__main__'<block_start>save_data_regresssion()<line_sep>#save_data_classification() <block_end>
<import_stmt>torch<import_stmt>torch.optim<as>optim<import_stmt>numpy<as>np<import_from_stmt>PIL Image<line_sep>#import pano <import_stmt>pano_gen<as>pano<import_stmt>time<def_stmt>vecang vec1 vec2<block_start>vec1=vec1/np.sqrt((vec1<power>2).sum())<line_sep>vec2=vec2/np.sqrt((vec2<power>2).sum())<line_sep><return>np.arccos(np.dot(vec1 vec2))<block_end><def_stmt>rotatevec vec theta<block_start>x=vec[0]<times>torch.cos(theta)-vec[1]<times>torch.sin(theta)<line_sep>y=vec[0]<times>torch.sin(theta)+vec[1]<times>torch.cos(theta)<line_sep><return>torch.cat([x y])<block_end><def_stmt>pts_linspace pa pb pts=300<block_start>pa=pa.view(1 2)<line_sep>pb=pb.view(1 2)<line_sep>w=torch.arange(0 pts+1 dtype=pa.dtype).view(-1 1)<line_sep><return>(pa<times>(pts-w)+pb<times>w)/pts<block_end><def_stmt>xyz2uv xy z=-1<block_start>c=torch.sqrt((xy<power>2).sum(1))<line_sep>u=torch.atan2(xy[: 1] xy[: 0]).view(-1 1)<line_sep>v=torch.atan2(torch.zeros_like(c)+z c).view(-1 1)<line_sep><return>torch.cat([u v] dim=1)<block_end><def_stmt>uv2idx uv w h<block_start>col=(uv[: 0]/(2<times>np.pi)+0.5)<times>w-0.5<line_sep>row=(uv[: 1]/np.pi+0.5)<times>h-0.5<line_sep><return>torch.cat([col.view(-1 1) row.view(-1 1)] dim=1)<block_end><def_stmt>wallidx xy w h z1 z2<block_start>col=(torch.atan2(xy[1] xy[0])/(2<times>np.pi)+0.5)<times>w-0.5<line_sep>c=torch.sqrt((xy<power>2).sum())<line_sep>row_s=(torch.atan2(torch.zeros_like(c)+z1 c)/np.pi+0.5)<times>h-0.5<line_sep>row_t=(torch.atan2(torch.zeros_like(c)+z2 c)/np.pi+0.5)<times>h-0.5<line_sep>pa=torch.cat([col.view(1) row_s.view(1)])<line_sep>pb=torch.cat([col.view(1) row_t.view(1)])<line_sep><return>pts_linspace(pa pb)<block_end><def_stmt>map_coordinates input coordinates<block_start>''' PyTorch version of scipy.ndimage.interpolation.map_coordinates input: (H, W) coordinates: (2, ...) '''<line_sep>h=input.shape[0]<line_sep>w=input.shape[1]<def_stmt>_coordinates_pad_wrap h w coordinates<block_start>coordinates[0]=coordinates[0]%h<line_sep>coordinates[1]=coordinates[1]%w<line_sep><return>coordinates<block_end>co_floor=torch.floor(coordinates).long()<line_sep>co_ceil=torch.ceil(coordinates).long()<line_sep>d1=(coordinates[1]-co_floor[1].float())<line_sep>d2=(coordinates[0]-co_floor[0].float())<line_sep>co_floor=_coordinates_pad_wrap(h w co_floor)<line_sep>co_ceil=_coordinates_pad_wrap(h w co_ceil)<line_sep>f00=input[co_floor[0] co_floor[1]]<line_sep>f10=input[co_floor[0] co_ceil[1]]<line_sep>f01=input[co_ceil[0] co_floor[1]]<line_sep>f11=input[co_ceil[0] co_ceil[1]]<line_sep>fx1=f00+d1<times>(f10-f00)<line_sep>fx2=f01+d1<times>(f11-f01)<line_sep><return>fx1+d2<times>(fx2-fx1)<block_end><def_stmt>pc2cor_id pc pc_vec pc_theta pc_height<block_start><if_stmt>pc_theta.numel()<eq>1<block_start>ps=torch.stack([(pc+pc_vec) (pc+rotatevec(pc_vec pc_theta)) (pc-pc_vec) (pc+rotatevec(pc_vec pc_theta-np.pi))])<block_end><else_stmt><block_start>ps=pc+pc_vec<line_sep>ps=ps.view(-1 2)<for_stmt>c_num range(pc_theta.shape[1])<block_start>ps=torch.cat((ps ps[c_num: :]) 0)<if_stmt>(c_num%2)<eq>0<block_start>ps[-1 1]=pc_theta[0 c_num]<block_end><else_stmt><block_start>ps[-1 0]=pc_theta[0 c_num]<block_end><block_end>ps=torch.cat((ps ps[-1: :]) 0)<line_sep>ps[-1 1]=ps[0 1]<block_end><return>torch.cat([uv2idx(xyz2uv(ps z=-1) 1024 512) uv2idx(xyz2uv(ps z=pc_height) 1024 512) ] dim=0)<block_end><def_stmt>project2sphere_score pc pc_vec pc_theta pc_height scoreedg scorecor i_step=<none># Sample corner loss <block_start>corid=pc2cor_id(pc pc_vec pc_theta pc_height)<line_sep>corid_coordinates=torch.stack([corid[: 1] corid[: 0]])<line_sep>loss_cor=-map_coordinates(scorecor corid_coordinates).mean()<line_sep># Sample boundary loss <if_stmt>pc_theta.numel()<eq>1<block_start>p1=pc+pc_vec<line_sep>p2=pc+rotatevec(pc_vec pc_theta)<line_sep>p3=pc-pc_vec<line_sep>p4=pc+rotatevec(pc_vec pc_theta-np.pi)<line_sep>segs=[pts_linspace(p1 p2) pts_linspace(p2 p3) pts_linspace(p3 p4) pts_linspace(p4 p1) ]<block_end><else_stmt><block_start>ps=pc+pc_vec<line_sep>ps=ps.view(-1 2)<for_stmt>c_num range(pc_theta.shape[1])<block_start>ps=torch.cat((ps ps[c_num: :]) 0)<if_stmt>(c_num%2)<eq>0<block_start>ps[-1 1]=pc_theta[0 c_num]<block_end><else_stmt><block_start>ps[-1 0]=pc_theta[0 c_num]<block_end><block_end>ps=torch.cat((ps ps[-1: :]) 0)<line_sep>ps[-1 1]=ps[0 1]<line_sep>segs=[]<for_stmt>c_num range(ps.shape[0]-1)<block_start>segs.append(pts_linspace(ps[c_num :] ps[c_num+1 :]))<block_end>segs.append(pts_linspace(ps[-1 :] ps[0 :]))<block_end># ceil-wall loss_ceilwall=0<for_stmt>seg segs<block_start>ceil_uv=xyz2uv(seg z=-1)<line_sep>ceil_idx=uv2idx(ceil_uv 1024 512)<line_sep>ceil_coordinates=torch.stack([ceil_idx[: 1] ceil_idx[: 0]])<line_sep>loss_ceilwall<augsub>map_coordinates(scoreedg[<ellipsis> 1] ceil_coordinates).mean()/len(segs)<block_end># floor-wall loss_floorwall=0<for_stmt>seg segs<block_start>floor_uv=xyz2uv(seg z=pc_height)<line_sep>floor_idx=uv2idx(floor_uv 1024 512)<line_sep>floor_coordinates=torch.stack([floor_idx[: 1] floor_idx[: 0]])<line_sep>loss_floorwall<augsub>map_coordinates(scoreedg[<ellipsis> 2] floor_coordinates).mean()/len(segs)<block_end>#losses = 1.0 * loss_cor + 0.1 * loss_wallwall + 0.5 * loss_ceilwall + 1.0 * loss_floorwall losses=1.0<times>loss_cor+1.0<times>loss_ceilwall+1.0<times>loss_floorwall<if_stmt>i_step<is><not><none><block_start><with_stmt>torch.no_grad()<block_start>print('step %d: %.3f (cor %.3f, wall %.3f, ceil %.3f, floor %.3f)'%(i_step losses loss_cor loss_wallwall loss_ceilwall loss_floorwall))<block_end><block_end><return>losses<block_end><def_stmt>optimize_cor_id cor_id scoreedg scorecor num_iters=100 verbose=<false><block_start><assert_stmt>scoreedg.shape<eq>(512 1024 3)<assert_stmt>scorecor.shape<eq>(512 1024)<line_sep>Z=-1<line_sep>ceil_cor_id=cor_id[0::2]<line_sep>floor_cor_id=cor_id[1::2]<line_sep>ceil_cor_id,ceil_cor_id_xy=pano.constraint_cor_id_same_z(ceil_cor_id scorecor Z)<line_sep>#ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(4).reshape(-1, 1) + Z]) ceil_cor_id_xyz=np.hstack([ceil_cor_id_xy np.zeros(ceil_cor_id.shape[0]).reshape(-1 1)+Z])<line_sep># TODO: revise here to general layout #pc = (ceil_cor_id_xy[0] + ceil_cor_id_xy[2]) / 2 #print(ceil_cor_id_xy) <if_stmt>abs(ceil_cor_id_xy[0 0]-ceil_cor_id_xy[1 0])<g>abs(ceil_cor_id_xy[0 1]-ceil_cor_id_xy[1 1])<block_start>ceil_cor_id_xy=np.concatenate((ceil_cor_id_xy[1: :] ceil_cor_id_xy[:1 :]) axis=0)<block_end>#print(cor_id) #print(ceil_cor_id_xy) pc=np.mean(ceil_cor_id_xy axis=0)<line_sep>pc_vec=ceil_cor_id_xy[0]-pc<line_sep>pc_theta=vecang(pc_vec ceil_cor_id_xy[1]-pc)<line_sep>pc_height=pano.fit_avg_z(floor_cor_id ceil_cor_id_xy scorecor)<if_stmt>ceil_cor_id_xy.shape[0]<g>4<block_start>pc_theta=np.array([ceil_cor_id_xy[1 1]])<for_stmt>c_num range(2 ceil_cor_id_xy.shape[0]-1)<block_start><if_stmt>(c_num%2)<eq>0<block_start>pc_theta=np.append(pc_theta ceil_cor_id_xy[c_num 0])<block_end><else_stmt><block_start>pc_theta=np.append(pc_theta ceil_cor_id_xy[c_num 1])<block_end><block_end><block_end>scoreedg=torch.FloatTensor(scoreedg)<line_sep>scorecor=torch.FloatTensor(scorecor)<line_sep>pc=torch.FloatTensor(pc)<line_sep>pc_vec=torch.FloatTensor(pc_vec)<line_sep>pc_theta=torch.FloatTensor([pc_theta])<line_sep>pc_height=torch.FloatTensor([pc_height])<line_sep>pc.requires_grad=<true><line_sep>pc_vec.requires_grad=<true><line_sep>pc_theta.requires_grad=<true><line_sep>pc_height.requires_grad=<true><line_sep>#print(pc_theta) #time.sleep(2) #return cor_id optimizer=optim.SGD([pc pc_vec pc_theta pc_height] lr=1e-3 momentum=0.9)<line_sep>best={'score':1e9}<for_stmt>i_step range(num_iters)<block_start>i=i_step<if>verbose<else><none><line_sep>optimizer.zero_grad()<line_sep>score=project2sphere_score(pc pc_vec pc_theta pc_height scoreedg scorecor i)<if_stmt>score.item()<l>best['score']<block_start>best['score']=score.item()<line_sep>best['pc']=pc.clone()<line_sep>best['pc_vec']=pc_vec.clone()<line_sep>best['pc_theta']=pc_theta.clone()<line_sep>best['pc_height']=pc_height.clone()<block_end>score.backward()<line_sep>optimizer.step()<block_end>pc=best['pc']<line_sep>pc_vec=best['pc_vec']<line_sep>pc_theta=best['pc_theta']<line_sep>pc_height=best['pc_height']<line_sep>opt_cor_id=pc2cor_id(pc pc_vec pc_theta pc_height).detach().numpy()<line_sep>split_num=int(opt_cor_id.shape[0]<floordiv>2)<line_sep>opt_cor_id=np.stack([opt_cor_id[:split_num] opt_cor_id[split_num:]] axis=1).reshape(split_num<times>2 2)<line_sep>#print(opt_cor_id) #print(cor_id) #time.sleep(500) <return>opt_cor_id<block_end>
# -*- coding: UTF-8 -*- <import_from_stmt>flask url_for g redirect<import_from_stmt>flask_login logout_user current_user<import_from_stmt>datetime datetime<import_from_stmt>importlib import_module<import_from_stmt>.. db login_manager<import_from_stmt>..models User<import_from_stmt>. auth<line_sep>@auth.route('/login/<string:authtype>')<def_stmt>login_authorize authtype<block_start>oauth=getattr(import_module('.'+authtype __package__) authtype)<line_sep><return>oauth.authorize(callback=url_for('auth.{}_authorized'.format(authtype) _external=<true>))<block_end>@auth.route('/logout')<def_stmt>logout <block_start>logout_user()<line_sep><return>redirect('/')<block_end>@auth.before_app_request<def_stmt>before_request <block_start>g.user=current_user<if_stmt>g.user.is_authenticated<block_start>g.user.last_seen=datetime.utcnow()<line_sep>db.session.add(g.user)<line_sep>db.session.commit()<block_end><block_end>@login_manager.user_loader<def_stmt>load_user user_id<block_start><return>User.query.get(int(user_id))<block_end>
""" Protocols for matching putatively interacting sequences in protein complexes to create a concatenated sequence alignment Authors: <NAME> <NAME> """<import_from_stmt>collections Counter<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>evcouplings.couplings.mapping Segment<import_from_stmt>evcouplings.utils.config check_required InvalidParameterError <import_from_stmt>evcouplings.utils.system create_prefix_folders verify_resources <import_from_stmt>evcouplings.align.protocol modify_alignment<import_from_stmt>evcouplings.complex.alignment write_concatenated_alignment <import_from_stmt>evcouplings.complex.distance find_possible_partners best_reciprocal_matching plot_distance_distribution <import_from_stmt>evcouplings.complex.similarity read_species_annotation_table most_similar_by_organism filter_best_reciprocal find_paralogs <def_stmt>modify_complex_segments outcfg **kwargs<block_start>""" Modifies the output configuration so that the segments are correct for a concatenated alignment Parameters ---------- outcfg : dict The output configuration Returns ------- outcfg: dict The output configuration, with a new field called "segments" """<def_stmt>_modify_segments seg_list seg_prefix# extract segments from list representation into objects <block_start>segs=[Segment.from_list(s)<for>s seg_list]<line_sep># update segment IDs <for_stmt>i,s enumerate(segs start=1)<block_start>s.segment_id="{}_{}".format(seg_prefix i)<block_end><return>segs<block_end># merge segments - this allows to have more than one segment per # "monomer" alignment segments_1=_modify_segments(kwargs["first_segments"] "A")<line_sep>segments_2=_modify_segments(kwargs["second_segments"] "B")<line_sep>segments_complex=segments_1+segments_2<line_sep>outcfg["segments"]=[s.to_list()<for>s segments_complex]<line_sep><return>outcfg<block_end><def_stmt>_run_describe_concatenation outcfg **kwargs<block_start>""" calculate some basic statistics on the concatenated alignment """<line_sep>prefix=kwargs["prefix"]<line_sep>outcfg["concatentation_statistics_file"]=prefix+"_concatenation_statistics.csv"<line_sep>describe_concatenation(kwargs["first_annotation_file"] kwargs["second_annotation_file"] kwargs["first_genome_location_file"] kwargs["second_genome_location_file"] outcfg["concatentation_statistics_file"])<line_sep><return>outcfg<block_end><def_stmt>describe_concatenation annotation_file_1 annotation_file_2 genome_location_filename_1 genome_location_filename_2 outfile<block_start>""" Describes properties of concatenated alignment. Writes a csv with the following columns num_seqs_1 : number of sequences in the first monomer alignment num_seqs_2 : number of sequences in the second monomer alignment num_nonred_species_1 : number of unique species annotations in the first monomer alignment num_nonred_species_2 : number of unique species annotations in the second monomer alignment num_species_overlap: number of unique species found in both alignments median_num_per_species_1 : median number of paralogs per species in the first monomer alignmment median_num_per_species_2 : median number of paralogs per species in the second monomer alignment num_with_embl_cds_1 : number of IDs for which we found an EMBL CDS in the first monomer alignment (relevant to distance concatention only) num_with_embl_cds_2 : number of IDs for which we found an EMBL CDS in the first monomer alignment (relevant to distance concatention only) Parameters ---------- annotation_file_1 : str Path to annotation.csv file for first monomer alignment annotation_file_2 : str Path to annotation.csv file for second monomer alignment genome_location_filename_1 : str Path to genome location mapping file for first alignment genome_location_filename_2 : str Path to genome location mapping file for second alignment outfile: str Path to output file """<line_sep># load the annotations for each alignment # as a pd.DataFrame annotations_1=read_species_annotation_table(annotation_file_1)<line_sep>species_1=annotations_1.species.values<line_sep>annotations_2=read_species_annotation_table(annotation_file_2)<line_sep>species_2=annotations_2.species.values<line_sep># calculate the number of sequences found in each alignment num_seqs_1=len(annotations_1)<line_sep>num_seqs_2=len(annotations_2)<line_sep># calculate the number of species found in each alignment # where a species is defined as a unique OS or Tax annotation field nonredundant_annotations_1=len(set(species_1))<line_sep>nonredundant_annotations_2=len(set(species_2))<line_sep># calculate the number of overlapping species species_overlap=list(set(species_1).intersection(set(species_2)))<line_sep>n_species_overlap=len(species_overlap)<line_sep># calculate the median number of paralogs per species n_paralogs_1=float(# counts the number of times each species occurs in the list # then takes the median np.median(list(Counter(species_1).values())))<line_sep>n_paralogs_2=float(np.median(list(Counter(species_2).values())))<line_sep># If the user provided genome location files, calculate the number # of ids for which we found an embl CDS. Default value is np.nan embl_cds1=np.nan<line_sep>embl_cds2=np.nan<if_stmt>(genome_location_filename_1<is><not><none><and>genome_location_filename_2<is><not><none>)<block_start>genome_location_table_1=pd.read_csv(genome_location_filename_1)<line_sep>genome_location_table_2=pd.read_csv(genome_location_filename_2)<line_sep># Number uniprot IDs with EMBL CDS that is not NA <if_stmt>"uniprot_ac"<in>genome_location_table_1.columns<block_start>embl_cds1=len(list(set(genome_location_table_1.uniprot_ac)))<block_end><if_stmt>"uniprot_ac"<in>genome_location_table_2.columns<block_start>embl_cds2=len(list(set(genome_location_table_2.uniprot_ac)))<block_end><block_end>concatenation_data=[num_seqs_1 num_seqs_2 nonredundant_annotations_1 nonredundant_annotations_2 n_species_overlap n_paralogs_1 n_paralogs_2 embl_cds1 embl_cds2 ]<line_sep>cols=["num_seqs_1" "num_seqs_2" "num_nonred_species_1" "num_nonred_species_2" "num_species_overlap" "median_num_per_species_1" "median_num_per_species_2" "num_with_embl_cds_1" "num_with_embl_cds_2" ]<line_sep># create dataframe and store data_df=pd.DataFrame([concatenation_data] columns=cols)<line_sep>data_df.to_csv(outfile)<block_end><def_stmt>genome_distance **kwargs<block_start>""" Protocol: Concatenate alignments based on genomic distance Parameters ---------- Mandatory kwargs arguments: See list below in code where calling check_required Returns ------- outcfg : dict Output configuration of the pipeline, including the following fields: * alignment_file * raw_alignment_file * focus_mode * focus_sequence * segments * frequencies_file * identities_file * num_sequences * num_sites * raw_focus_alignment_file * statistics_file """<line_sep>check_required(kwargs ["prefix" "first_alignment_file" "second_alignment_file" "first_focus_sequence" "second_focus_sequence" "first_focus_mode" "second_focus_mode" "first_region_start" "second_region_start" "first_segments" "second_segments" "genome_distance_threshold" "first_genome_location_file" "second_genome_location_file" "first_annotation_file" "second_annotation_file"])<line_sep>prefix=kwargs["prefix"]<line_sep># make sure input alignments exist verify_resources("Input alignment does not exist" kwargs["first_alignment_file"] kwargs["second_alignment_file"])<line_sep>verify_resources("Genome location file does not exist" kwargs["first_genome_location_file"] kwargs["second_genome_location_file"])<line_sep># make sure output directory exists create_prefix_folders(prefix)<line_sep># load the information for each monomer alignment alignment_1=kwargs["first_alignment_file"]<line_sep>alignment_2=kwargs["second_alignment_file"]<line_sep>genome_location_filename_1=kwargs["first_genome_location_file"]<line_sep>genome_location_filename_2=kwargs["second_genome_location_file"]<line_sep>gene_location_table_1=pd.read_csv(genome_location_filename_1 header=0)<line_sep>gene_location_table_2=pd.read_csv(genome_location_filename_2 header=0)<line_sep># find all possible matches possible_partners=find_possible_partners(gene_location_table_1 gene_location_table_2)<line_sep># find the best reciprocal matches id_pairing_unfiltered=best_reciprocal_matching(possible_partners)<line_sep># filter best reciprocal matches by genome distance threshold <if_stmt>kwargs["genome_distance_threshold"]<block_start>distance_threshold=kwargs["genome_distance_threshold"]<line_sep>id_pairing=id_pairing_unfiltered.query("distance < @distance_threshold")<block_end><else_stmt><block_start>id_pairing=id_pairing_unfiltered<block_end>id_pairing.loc[: "id_1"]=id_pairing.loc[: "uniprot_id_1"]<line_sep>id_pairing.loc[: "id_2"]=id_pairing.loc[: "uniprot_id_2"]<line_sep># write concatenated alignment with distance filtering # TODO: save monomer alignments? target_seq_id,target_seq_index,raw_ali,mon_ali_1,mon_ali_2=write_concatenated_alignment(id_pairing alignment_1 alignment_2 kwargs["first_focus_sequence"] kwargs["second_focus_sequence"])<line_sep># save the alignment files raw_alignment_file=prefix+"_raw.fasta"<with_stmt>open(raw_alignment_file "w")<as>of<block_start>raw_ali.write(of)<block_end>mon_alignment_file_1=prefix+"_monomer_1.fasta"<with_stmt>open(mon_alignment_file_1 "w")<as>of<block_start>mon_ali_1.write(of)<block_end>mon_alignment_file_2=prefix+"_monomer_2.fasta"<with_stmt>open(mon_alignment_file_2 "w")<as>of<block_start>mon_ali_2.write(of)<block_end># filter the alignment aln_outcfg,_=modify_alignment(raw_ali target_seq_index target_seq_id kwargs["first_region_start"] **kwargs)<line_sep># make sure we return all the necessary information: # * alignment_file: final concatenated alignment that will go into plmc # * focus_sequence: this is the identifier of the concatenated target # sequence which will be passed into plmc with -f outcfg=aln_outcfg<line_sep>outcfg["raw_alignment_file"]=raw_alignment_file<line_sep>outcfg["first_concatenated_monomer_alignment_file"]=mon_alignment_file_1<line_sep>outcfg["second_concatenated_monomer_alignment_file"]=mon_alignment_file_2<line_sep>outcfg["focus_sequence"]=target_seq_id<line_sep># Update the segments outcfg=modify_complex_segments(outcfg **kwargs)<line_sep># Describe the statistics of the concatenation outcfg=_run_describe_concatenation(outcfg **kwargs)<line_sep># plot the genome distance distribution outcfg["distance_plot_file"]=prefix+"_distplot.pdf"<line_sep>plot_distance_distribution(id_pairing_unfiltered outcfg["distance_plot_file"])<line_sep><return>outcfg<block_end><def_stmt>best_hit **kwargs<block_start>""" Protocol: Concatenate alignments based on the best hit to the focus sequence in each species Parameters ---------- Mandatory kwargs arguments: See list below in code where calling check_required Returns ------- outcfg : dict Output configuration of the pipeline, including the following fields: alignment_file raw_alignment_file focus_mode focus_sequence segments frequencies_file identities_file num_sequences num_sites raw_focus_alignment_file statistics_file """<line_sep>check_required(kwargs ["prefix" "first_alignment_file" "second_alignment_file" "first_focus_sequence" "second_focus_sequence" "first_focus_mode" "second_focus_mode" "first_segments" "second_segments" "first_identities_file" "second_identities_file" "first_annotation_file" "second_annotation_file" "use_best_reciprocal" "paralog_identity_threshold"])<line_sep>prefix=kwargs["prefix"]<line_sep># make sure input alignments verify_resources("Input alignment does not exist" kwargs["first_alignment_file"] kwargs["second_alignment_file"])<line_sep># make sure output directory exists create_prefix_folders(prefix)<def_stmt>_load_monomer_info annotations_file identities_file target_sequence alignment_file use_best_reciprocal identity_threshold# read in annotation to a file and rename the appropriate column <block_start>annotation_table=read_species_annotation_table(annotations_file)<line_sep># read identity file similarities=pd.read_csv(identities_file)<line_sep># create a pd.DataFrame containing the best hit in each organism most_similar_in_species=most_similar_by_organism(similarities annotation_table)<if_stmt>use_best_reciprocal<block_start>paralogs=find_paralogs(target_sequence annotation_table similarities identity_threshold)<line_sep>most_similar_in_species=filter_best_reciprocal(alignment_file paralogs most_similar_in_species)<block_end><return>most_similar_in_species<block_end># load the information about each monomer alignment most_similar_in_species_1=_load_monomer_info(kwargs["first_annotation_file"] kwargs["first_identities_file"] kwargs["first_focus_sequence"] kwargs["first_alignment_file"] kwargs["use_best_reciprocal"] kwargs["paralog_identity_threshold"])<line_sep>most_similar_in_species_2=_load_monomer_info(kwargs["second_annotation_file"] kwargs["second_identities_file"] kwargs["second_focus_sequence"] kwargs["second_alignment_file"] kwargs["use_best_reciprocal"] kwargs["paralog_identity_threshold"])<line_sep># merge the two dataframes to get all species found in # both alignments species_intersection=most_similar_in_species_1.merge(most_similar_in_species_2 how="inner" # takes the intersection on="species" # merges on species identifiers suffixes=("_1" "_2"))<line_sep># write concatenated alignment with distance filtering # TODO: save monomer alignments? target_seq_id,target_seq_index,raw_ali,mon_ali_1,mon_ali_2=write_concatenated_alignment(species_intersection kwargs["first_alignment_file"] kwargs["second_alignment_file"] kwargs["first_focus_sequence"] kwargs["second_focus_sequence"])<line_sep># save the alignment files raw_alignment_file=prefix+"_raw.fasta"<with_stmt>open(raw_alignment_file "w")<as>of<block_start>raw_ali.write(of)<block_end>mon_alignment_file_1=prefix+"_monomer_1.fasta"<with_stmt>open(mon_alignment_file_1 "w")<as>of<block_start>mon_ali_1.write(of)<block_end>mon_alignment_file_2=prefix+"_monomer_2.fasta"<with_stmt>open(mon_alignment_file_2 "w")<as>of<block_start>mon_ali_2.write(of)<block_end>aln_outcfg,_=modify_alignment(raw_ali target_seq_index target_seq_id kwargs["first_region_start"] **kwargs)<line_sep># make sure we return all the necessary information: # * alignment_file: final concatenated alignment that will go into plmc # * focus_sequence: this is the identifier of the concatenated target # sequence which will be passed into plmc with -f outcfg=aln_outcfg<line_sep>outcfg["raw_alignment_file"]=raw_alignment_file<line_sep>outcfg["first_concatenated_monomer_alignment_file"]=mon_alignment_file_1<line_sep>outcfg["second_concatenated_monomer_alignment_file"]=mon_alignment_file_2<line_sep>outcfg["focus_sequence"]=target_seq_id<line_sep># Update the segments outcfg=modify_complex_segments(outcfg **kwargs)<line_sep># Describe the statistics of the concatenation outcfg=_run_describe_concatenation(outcfg **kwargs)<line_sep><return>outcfg<block_end># list of available EC inference protocols PROTOCOLS={# concatenate based on genomic distance ("operon-based") "genome_distance":genome_distance # concatenate based on best hit per genome ("species") "best_hit":best_hit}<def_stmt>run **kwargs<block_start>""" Run alignment concatenation protocol Parameters ---------- Mandatory kwargs arguments: protocol: concatenation protocol to run prefix: Output prefix for all generated files Returns ------- outcfg : dict Output configuration of concatenation stage Dictionary with results in following fields: (in brackets: not mandatory) alignment_file raw_alignment_file focus_mode focus_sequence segments frequencies_file identities_file num_sequences num_sites raw_focus_alignment_file statistics_file """<line_sep>check_required(kwargs ["protocol"])<if_stmt>kwargs["protocol"]<not><in>PROTOCOLS<block_start><raise>InvalidParameterError("Invalid protocol selection: "+"{}. Valid protocols are: {}".format(kwargs["protocol"] ", ".join(PROTOCOLS.keys())))<block_end><return>PROTOCOLS[kwargs["protocol"]](**kwargs)<block_end>
<import_stmt>networkx<as>nx<import_from_stmt>.sankey_definition Ordering<class_stmt>LayeredMixin(object)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.ordering=Ordering([])<block_end><def_stmt>copy self<block_start>new=super().copy()<line_sep>new.ordering=self.ordering<line_sep><return>new<block_end><def_stmt>remove_node self u<block_start>super().remove_node(u)<line_sep>self.ordering=self.ordering.remove(u)<block_end><def_stmt>get_node self u<block_start>"""Get the ProcessGroup or Waypoint associated with `u`"""<line_sep><return>self.nodes[u]['node']<block_end><block_end><class_stmt>LayeredGraph(LayeredMixin nx.DiGraph)<block_start><pass><block_end><class_stmt>MultiLayeredGraph(LayeredMixin nx.MultiDiGraph)<block_start><pass><block_end>
# -*- coding: utf-8 -*- <import_stmt>scrapy<import_stmt>json<class_stmt>ZhihuSpider(scrapy.Spider)<block_start>name='zhihu'<line_sep>allowed_domains=['www.zhihu.com']<line_sep>start_urls=['https://www.zhihu.com/']<line_sep>loginUrl='https://www.zhihu.com/#signin'<line_sep>siginUrl='https://www.zhihu.com/login/email'<line_sep>feedUrl='https://www.zhihu.com/api/v3/feed/topstory'<line_sep>nextFeedUrl=''<line_sep>curFeedId=0<line_sep>custom_settings={"COOKIES_ENABLED":<true> }<line_sep>headers={'Host':'www.zhihu.com' 'Connection':'keep-alive' 'Origin':'https://www.zhihu.com' 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36' 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8' 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' 'X-Requested-With':'XMLHttpRequest' 'DNT':1 'Referer':'https://www.zhihu.com/' 'Accept-Encoding':'gzip, deflate, br' 'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6' 'Upgrade-Insecure-Requests:':1 }<line_sep>cookies={'d_c0':'"AHCAtu1iqAmPTped76X1ZdN0X_qAwhjdLUU=|1458699045"' '__utma':'51854390.1407411155.1458699046.1458699046.1458699046.1' '__utmv':'51854390.000--|3=entry_date=20160322=1' '_zap':'850897bb-cba4-4d0b-8653-fd65e7578ac2' 'q_c1':'b7918ff9a5514d2981c30050c8c732e1|1502937247000|1491446589000' 'aliyungf_tc':'AQAAACtKLW+lywEAOhSntJwFFTilwpwt' '_xsrf':'f3ab08fc68489f44ae77236555367c70' 'r_cap_id':'"M2NjNDAwNTZmY2ExNDA3NzgzNjZkZDA1ODNjZWJkNjI=|1503458111|36984ab33f21997b742d97ace2e02043cbb0a76e"' 'cap_id':'"ZTIxMmM5Yzg1MGJkNDcxNjgxYzZjMjNlYTg3OGE0Yzk=|1503457914|8dce8550bca28e427771a0e7e1fe1bafb6e170f6"' }<def_stmt>start_requests self<block_start><return>[scrapy.http.FormRequest(self.loginUrl headers=self.headers cookies=self.cookies meta={'cookiejar':1} callback=self.post_login)]<block_end><def_stmt>post_login self response<block_start>xsrf=response.css('div.view-signin > form > input[name=_xsrf]::attr(value)').extract_first()<line_sep>self.headers['X-Xsrftoken']=xsrf<line_sep><return>[scrapy.http.FormRequest(self.siginUrl method='POST' headers=self.headers meta={'cookiejar':response.meta['cookiejar']} formdata={'_xsrf':xsrf 'captcha_type':'cn' 'email':'<EMAIL>' 'password':'<PASSWORD>' } callback=self.after_login)]<block_end><def_stmt>after_login self response<block_start>jdict=json.loads(response.body)<line_sep>print('after_login' jdict)<if_stmt>jdict['r']<eq>0<block_start>z_c0=response.headers.getlist('Set-Cookie')[2].split(';')[0].split('=')[1]<line_sep>self.headers['authorization']='Bearer '+z_c0<line_sep><return>scrapy.http.FormRequest(url=self.feedUrl method='GET' meta={'cookiejar':response.meta['cookiejar']} headers=self.headers formdata={'action_feed':'True' 'limit':'10' 'action':'down' 'after_id':str(self.curFeedId) 'desktop':'true'} callback=self.parse)<block_end><else_stmt><block_start>print(jdict['error'])<block_end><block_end><def_stmt>parse self response<block_start><with_stmt>open('zhihu.json' 'a')<as>fd<block_start>fd.write(response.body)<block_end>jdict=json.loads(response.body)<line_sep>jdatas=jdict['data']<for_stmt>entry jdatas<block_start>entry['pid']=entry['id']<line_sep><yield>entry<block_end>jpaging=jdict['paging']<line_sep>self.curFeedId<augadd>len(jdatas)<if_stmt>jpaging['is_end']<eq><false><and>self.curFeedId<l>50<block_start>self.nextFeedUrl=jpaging['next']<line_sep><yield>self.next_request(response)<block_end><block_end><def_stmt>next_request self response<block_start><return>scrapy.http.FormRequest(url=self.nextFeedUrl method='GET' meta={'cookiejar':response.meta['cookiejar']} headers=self.headers callback=self.parse)<block_end><block_end>
<import_stmt>logging<line_sep>__all__=["SoftwareMFMDecoder"]<class_stmt>SoftwareMFMDecoder<block_start><def_stmt>__init__ self logger<block_start>self._logger=logger<line_sep>self._lock_time=0<line_sep>self._bit_time=0<block_end><def_stmt>_log self message *args<block_start>self._logger.log(logging.DEBUG "soft-MFM: "+message *args)<block_end><def_stmt>edges self bytestream<block_start>edge_len=0<for_stmt>byte bytestream<block_start>edge_len<augadd>1+byte<if_stmt>byte<eq>0xfd<block_start><continue><block_end><yield>edge_len<line_sep>edge_len=0<block_end><block_end><def_stmt>bits self bytestream<block_start>prev_byte=0<for_stmt>curr_byte bytestream<block_start><if_stmt>prev_byte<ne>0xfd<block_start><yield>1<block_end><for_stmt>_ range(curr_byte)<block_start><yield>0<block_end>prev_byte=curr_byte<block_end><block_end><def_stmt>domains self bitstream<block_start>polarity=1<for_stmt>has_edge bitstream<block_start><if_stmt>has_edge<block_start>polarity<augmul>-1<block_end><yield>polarity<block_end><block_end><def_stmt>lock self bitstream * debug=<false> nco_init_period=0 nco_min_period=16 nco_max_period=256 nco_frac_bits=8 pll_kp_exp=2 pll_gph_exp=1<block_start>nco_period=nco_init_period<lshift>nco_frac_bits<line_sep>nco_phase=0<line_sep>nco_step=1<lshift>nco_frac_bits<line_sep>nco_clock=0<line_sep>pll_error=0<line_sep>pll_feedbk=0<line_sep>bit_curr=0<for_stmt>has_edge bitstream<block_start><if_stmt>nco_period<l>nco_min_period<lshift>nco_frac_bits<block_start>nco_period=nco_min_period<lshift>nco_frac_bits<block_end><if_stmt>nco_period<ge>nco_max_period<lshift>nco_frac_bits<block_start>nco_period=nco_max_period<lshift>nco_frac_bits<block_end><if_stmt>has_edge<block_start>bit_curr=1<line_sep>pll_error=nco_phase-(nco_period<rshift>1)<line_sep>pll_p_term=abs(pll_error)<rshift>pll_kp_exp<line_sep>pll_gain=max(1<lshift>pll_gph_exp pll_p_term)<if_stmt>pll_error<l>0<block_start>pll_feedbk=+1<times>pll_gain<block_end><else_stmt><block_start>pll_feedbk=-1<times>pll_gain<block_end><block_end><if_stmt>nco_phase<ge>nco_period<block_start>nco_phase=0<if_stmt><not>debug<block_start><yield>bit_curr<block_end>bit_curr=0<block_end><else_stmt><block_start>nco_phase<augadd>nco_step+pll_feedbk<line_sep>nco_period<augsub>pll_feedbk<rshift>pll_gph_exp<line_sep>pll_feedbk=0<block_end><if_stmt>debug<block_start><yield>(nco_phase/nco_step nco_period/nco_step pll_error/nco_step)<block_end><block_end><block_end><def_stmt>demodulate self chipstream<block_start>shreg=[]<line_sep>offset=0<line_sep>synced=<false><line_sep>prev=0<line_sep>bits=[]<while_stmt><true><block_start><while_stmt>len(shreg)<l>64<block_start><try_stmt><block_start>shreg.append(next(chipstream))<block_end><except_stmt>StopIteration<block_start><return><block_end><block_end>synced_now=<false><for_stmt>sync_offset (0 1)<block_start><if_stmt>shreg[sync_offset:sync_offset+16]<eq>[0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 1]<block_start><if_stmt><not>synced<or>sync_offset<ne>0<block_start>self._log("sync=K.A1 chip-off=%d" offset+sync_offset)<block_end>offset<augadd>sync_offset+16<line_sep>shreg=shreg[sync_offset+16:]<line_sep>synced=<true><line_sep>prev=1<line_sep>bits=[]<line_sep><yield>(1 0xA1)<line_sep>synced_now=<true><block_end><if_stmt>synced_now<block_start><break><block_end><block_end><if_stmt>synced_now<block_start><continue><block_end><elif_stmt><not>synced<and>len(shreg)<ge>1<block_start>offset<augadd>1<line_sep>shreg=shreg[1:]<block_end><if_stmt>synced<and>len(shreg)<ge>2<block_start><if_stmt>shreg[0:2]<eq>[0 1]<block_start>curr=1<block_end><elif_stmt>prev<eq>1<and>shreg[0:2]<eq>[0 0]<block_start>curr=0<block_end><elif_stmt>prev<eq>0<and>shreg[0:2]<eq>[1 0]<block_start>curr=0<block_end><else_stmt><block_start>synced=<false><line_sep>self._log("desync chip-off=%d bitno=%d prev=%d cell=%d%d" offset len(bits) prev *shreg[0:2])<block_end><if_stmt>synced<block_start>offset<augadd>2<line_sep>shreg=shreg[2:]<line_sep>prev=curr<line_sep>bits.append(curr)<if_stmt>len(bits)<eq>8<block_start><yield>(0 sum(bit<lshift>(7-n)<for>n,bit enumerate(bits)))<line_sep>bits=[]<block_end><block_end><block_end><block_end><block_end><block_end>
<import_stmt>_plotly_utils.basevalidators<class_stmt>ScattermapboxValidator(_plotly_utils.basevalidators.CompoundArrayValidator)<block_start><def_stmt>__init__ self plotly_name="scattermapbox" parent_name="layout.template.data" **kwargs<block_start>super(ScattermapboxValidator self).__init__(plotly_name=plotly_name parent_name=parent_name data_class_str=kwargs.pop("data_class_str" "Scattermapbox") data_docs=kwargs.pop("data_docs" """ """ ) **kwargs)<block_end><block_end>
<import_stmt>copy<import_stmt>errno<import_stmt>mock<import_stmt>os<import_from_stmt>. TEST_CONF_DIR<import_from_stmt>django.conf settings<try_stmt><block_start><import_from_stmt>django.urls reverse<block_end><except_stmt>ImportError# Django < 1.10 <block_start><import_from_stmt>django.core.urlresolvers reverse<block_end><import_from_stmt>django.http HttpResponse<import_from_stmt>.base TestCase<import_from_stmt>django.test.utils override_settings<import_from_stmt>graphite.util json<try_stmt><block_start><import_from_stmt>django.contrib.auth get_user_model<line_sep>User=get_user_model()<block_end><except_stmt>ImportError<block_start><import_from_stmt>django.contrib.auth.models User<block_end><class_stmt>DashboardTest(TestCase)# Set config to the test config file <block_start>settings.DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf')<line_sep># Define a testtemplate testtemplate={"state":'{"graphs": [[ "target=a.b.c.*.__VALUE__.d", { "from":"-2days", "target":[ "a.b.c.*.__VALUE__.d" ], "until":"now" }, "/render?width=400&from=-2days&until=now&height=250&target=a.b.c.*.__VALUE__.d&_uniq=0.6526056618895382&title=a.b.c.*.__VALUE__.d" ]]}'}<line_sep>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.missing'))<def_stmt>test_dashboard_missing_conf self<block_start>url=reverse('dashboard')<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.missing'))<def_stmt>test_dashboard_template_missing_template self<block_start>url=reverse('dashboard_template' args=['bogustemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@mock.patch('graphite.dashboard.views.DashboardConfig.check')<def_stmt>test_dashboard_conf_read_failure self check<block_start>check.side_effect=OSError(errno.EPERM 'Operation not permitted')<line_sep>url=reverse('dashboard')<with_stmt>self.assertRaises(Exception)<block_start>_=self.client.get(url)<block_end><block_end>@mock.patch('graphite.dashboard.views.DashboardConfig.check')<def_stmt>test_dashboard_template_conf_read_failure self check<block_start>check.side_effect=OSError(errno.EPERM 'Operation not permitted')<line_sep>url=reverse('dashboard_template' args=['bogustemplate' 'testkey'])<with_stmt>self.assertRaises(Exception)<block_start>_=self.client.get(url)<block_end><block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.missing_ui'))<def_stmt>test_dashboard_conf_missing_ui self<block_start>url=reverse('dashboard')<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.missing_ui'))<def_stmt>test_dashboard_template_missing_ui self<block_start>url=reverse('dashboard_template' args=['bogustemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.missing_keyboard-shortcuts'))<def_stmt>test_dashboard_conf_missing_keyboard_shortcuts self<block_start>url=reverse('dashboard')<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.missing_keyboard-shortcuts'))<def_stmt>test_dashboard_template_missing_keyboard_shortcuts self<block_start>url=reverse('dashboard_template' args=['bogustemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.invalid_theme'))<def_stmt>test_dashboard_conf_invalid_theme self<block_start>url=reverse('dashboard')<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end>@override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR 'dashboard.conf.invalid_theme'))<def_stmt>test_dashboard_template_invalid_theme self<block_start>url=reverse('dashboard_template' args=['bogustemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard self<block_start>url=reverse('dashboard')<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_no_user self<block_start>url=reverse('dashboard')<line_sep>request={"user":'' "state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_pass_valid self<block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard' args=['testdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_pass_invalid_name self<block_start>url=reverse('dashboard' args=['bogusdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_find_empty self<block_start>url=reverse('dashboard_find')<line_sep>request={"query":""}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"dashboards": []}')<block_end><def_stmt>test_dashboard_save_empty self<block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_save_overwrite self<block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_find_existing self<block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_find')<line_sep>request={"query":"test"}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"dashboards": [{"name": "testdashboard"}]}')<block_end><def_stmt>test_dashboard_find_not_existing self<block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_find')<line_sep>request={"query":"not here"}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"dashboards": []}')<block_end><def_stmt>test_dashboard_load_not_existing self<block_start>url=reverse('dashboard_load' args=['bogusdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Dashboard \'bogusdashboard\' does not exist. "}')<block_end><def_stmt>test_dashboard_load_existing self<block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_load' args=['testdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"state": {}}')<block_end><def_stmt>test_dashboard_delete_nonexisting self<block_start>url=reverse('dashboard_delete' args=['bogusdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Dashboard \'bogusdashboard\' does not exist. "}')<block_end><def_stmt>test_dashboard_delete_existing self# Create a dashboard entry <block_start>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep># Delete it url=reverse('dashboard_delete' args=['testdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"success": true}')<line_sep># Confirm it was deleted url=reverse('dashboard_find')<line_sep>request={"query":""}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"dashboards": []}')<block_end><def_stmt>test_dashboard_create_temporary self<block_start>url=reverse('dashboard_create_temporary')<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"name": "temporary-0"}')<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"name": "temporary-1"}')<line_sep>url=reverse('dashboard_find')<line_sep>request={"query":""}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"dashboards": []}')<block_end><def_stmt>test_dashboard_template_pass_invalid self<block_start>url=reverse('dashboard_template' args=['bogustemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_template_pass_valid self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_template' args=['testtemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_find_template_empty self<block_start>url=reverse('dashboard_find_template')<line_sep>request={"query":""}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"templates": []}')<block_end><def_stmt>test_dashboard_save_template self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep># Save again after it now exists <block_end><def_stmt>test_dashboard_save_template_overwrite self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_find_template self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_find_template')<line_sep>request={"query":"test"}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"templates": [{"name": "testtemplate"}]}')<block_end><def_stmt>test_dashboard_find_template_nonexistent self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_find_template')<line_sep>request={"query":"not here"}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"templates": []}')<block_end><def_stmt>test_dashboard_load_template_nonexistent self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_load_template' args=['bogustemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Template \'bogustemplate\' does not exist. "}')<block_end><def_stmt>test_dashboard_load_template_existing self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_load_template' args=['testtemplate' 'testkey'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>data=json.loads(response.content)<line_sep>graph_data=json.loads(self.testtemplate["state"].replace('__VALUE__' 'testkey'))<line_sep>self.assertEqual(data json.loads('{"state": {"name": "testtemplate/testkey", "graphs": '+json.dumps(graph_data['graphs'])+'}}'))<block_end><def_stmt>test_dashboard_delete_template_nonexisting self# Delete nonexistent template <block_start>url=reverse('dashboard_delete_template' args=['bogustemplate'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Template \'bogustemplate\' does not exist. "}')<block_end><def_stmt>test_dashboard_delete_template_existing self<block_start>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>url=reverse('dashboard_delete_template' args=['testtemplate'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"success": true}')<line_sep>url=reverse('dashboard_find_template')<line_sep>request={"query":""}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"templates": []}')<block_end><def_stmt>test_dashboard_help self<block_start>url=reverse('dashboard_help')<line_sep>request={}<line_sep>response=self.client.get(url request)<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_dashboard_email self<block_start>url=reverse('dashboard_email')<line_sep>request={"sender":"<EMAIL>" "recipients":"noreply@localhost" "subject":"Test email" "message":"Here is the test graph" "graph_params":'{"target":["sumSeries(a.b.c.d)"],"title":"Test","width":"500","from":"-55minutes","until":"now","height":"400"}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.content b'{"success": true}')<block_end>@mock.patch('graphite.dashboard.views.renderView')<def_stmt>test_dashboard_email_mock_renderView self rv<block_start>url=reverse('dashboard_email')<line_sep>request={"sender":"nore<EMAIL>" "recipients":"nore<EMAIL>" "subject":"Test email" "message":"Here is the test graph" "graph_params":'{"target":["sumSeries(a.b.c.d)"],"title":"Test","width":"500","from":"-55minutes","until":"now","height":"400"}'}<line_sep>responseObject=HttpResponse()<line_sep>responseObject.content=''<line_sep>rv.return_value=responseObject<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.content b'{"success": true}')<block_end><def_stmt>test_dashboard_login_invalid_authenticate self<block_start>url=reverse('dashboard_login')<line_sep>request={"username":"testuser" "password":"<PASSWORD>"}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(json.loads(response.content) json.loads('{"errors": {"reason": "Username and/or password invalid."}, "success": false, "text": {}, "permissions": []}'))<block_end>@mock.patch('graphite.dashboard.views.authenticate')<def_stmt>test_dashboard_login_valid_authenticate self authenticate<block_start>url=reverse('dashboard_login')<line_sep>request={"username":"testuser" "password":"<PASSWORD>"}<line_sep>user=User.objects.create(email='<EMAIL>')<line_sep>user.backend=''<line_sep>authenticate.return_value=user<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(json.loads(response.content) json.loads('{"permissions": ["change", "delete"], "success": true, "text": {}, "errors": {}}'))<block_end>@mock.patch('graphite.dashboard.views.authenticate')<def_stmt>test_dashboard_login_valid_authenticate_not_active self authenticate<block_start>url=reverse('dashboard_login')<line_sep>request={"username":"testuser" "password":"<PASSWORD>"}<line_sep>user=User.objects.create(email='<EMAIL>')<line_sep>user.backend=''<line_sep>user.is_active=<false><line_sep>authenticate.return_value=user<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(json.loads(response.content) json.loads('{"permissions": [], "success": false, "errors": {"reason": "Account disabled."}, "text": {}}'))<block_end><def_stmt>test_dashboard_logout self<block_start>url=reverse('dashboard_logout')<line_sep>request={"username":"testuser"}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(json.loads(response.content) json.loads('{"errors": {}, "success": true, "text": {}}'))<block_end>@mock.patch('graphite.dashboard.views.getPermissions')<def_stmt>test_dashboard_save_no_permissions self gp<block_start>gp.return_value=[<none>]<line_sep>url=reverse('dashboard_save' args=['testdashboard'])<line_sep>request={"state":'{}'}<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Must be logged in with appropriate permissions to save"}')<block_end>@mock.patch('graphite.dashboard.views.getPermissions')<def_stmt>test_dashboard_delete_no_permissions self gp<block_start>gp.return_value=[<none>]<line_sep>url=reverse('dashboard_delete' args=['testdashboard'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Must be logged in with appropriate permissions to delete"}')<block_end>@mock.patch('graphite.dashboard.views.getPermissions')<def_stmt>test_dashboard_save_template_no_permissions self gp<block_start>gp.return_value=[<none>]<line_sep>url=reverse('dashboard_save_template' args=['testtemplate' 'testkey'])<line_sep>request=copy.deepcopy(self.testtemplate)<line_sep>response=self.client.post(url request)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Must be logged in with appropriate permissions to save the template"}')<block_end>@mock.patch('graphite.dashboard.views.getPermissions')<def_stmt>test_dashboard_delete_template_no_permissions self gp<block_start>gp.return_value=[<none>]<line_sep>url=reverse('dashboard_delete_template' args=['testtemplate'])<line_sep>response=self.client.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content b'{"error": "Must be logged in with appropriate permissions to delete the template"}')<block_end><def_stmt>test_getPermissions_no_user self<block_start>settings.DASHBOARD_REQUIRE_AUTHENTICATION=<false><line_sep>settings.DASHBOARD_REQUIRE_PERMISSIONS=<false><line_sep>settings.DASHBOARD_REQUIRE_EDIT_GROUP=<false><import_from_stmt>graphite.dashboard.views getPermissions<line_sep>self.assertEqual(getPermissions(<false>) ['change' 'delete'])<block_end><def_stmt>test_getPermissions_no_user_require_auth self<block_start>settings.DASHBOARD_REQUIRE_AUTHENTICATION=<true><line_sep>settings.DASHBOARD_REQUIRE_PERMISSIONS=<false><line_sep>settings.DASHBOARD_REQUIRE_EDIT_GROUP=<false><import_from_stmt>graphite.dashboard.views getPermissions<line_sep>self.assertEqual(getPermissions(<false>) [])<block_end><def_stmt>test_getPermissions_valid_user self<block_start>settings.DASHBOARD_REQUIRE_AUTHENTICATION=<true><line_sep>settings.DASHBOARD_REQUIRE_PERMISSIONS=<false><line_sep>settings.DASHBOARD_REQUIRE_EDIT_GROUP=<false><import_from_stmt>graphite.dashboard.views getPermissions<line_sep>user=User.objects.create(email='<EMAIL>')<line_sep>user.backend=''<line_sep>self.assertEqual(getPermissions(user) ['change' 'delete'])<block_end><def_stmt>test_getPermissions_valid_user_require_perm self<block_start>settings.DASHBOARD_REQUIRE_AUTHENTICATION=<true><line_sep>settings.DASHBOARD_REQUIRE_PERMISSIONS=<true><line_sep>settings.DASHBOARD_REQUIRE_EDIT_GROUP=<false><import_from_stmt>graphite.dashboard.views getPermissions<line_sep>user=User.objects.create(email='<EMAIL>')<line_sep>user.backend=''<line_sep>self.assertEqual(getPermissions(user) [])<block_end><def_stmt>test_getPermissions_valid_user_edit_group self<block_start>settings.DASHBOARD_REQUIRE_AUTHENTICATION=<true><line_sep>settings.DASHBOARD_REQUIRE_PERMISSIONS=<false><line_sep>settings.DASHBOARD_REQUIRE_EDIT_GROUP=<true><import_from_stmt>graphite.dashboard.views getPermissions<line_sep>user=User.objects.create(email='<EMAIL>')<line_sep>user.backend=''<line_sep>self.assertEqual(getPermissions(user) [])<block_end><def_stmt>test_getPermissions_valid_user_require_perms_edit_group self<block_start>settings.DASHBOARD_REQUIRE_AUTHENTICATION=<true><line_sep>settings.DASHBOARD_REQUIRE_PERMISSIONS=<true><line_sep>settings.DASHBOARD_REQUIRE_EDIT_GROUP=<true><import_from_stmt>graphite.dashboard.views getPermissions<line_sep>user=User.objects.create(email='<EMAIL>')<line_sep>user.backend=''<line_sep>self.assertEqual(getPermissions(user) [])<block_end><block_end>
<import_stmt>boto3<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<class_stmt>Connection(object)<block_start><def_stmt>__init__ self type service=<none> region='us-west-2' profile='default'<block_start>self.region=region<line_sep>self.connection_type=type<line_sep>self.service=service<line_sep>self.client=<none><line_sep>self.resource=<none><line_sep>self.profile=profile<try_stmt><block_start>boto3.setup_default_session(profile_name=self.profile)<block_end><except_stmt>Exception<as>e<block_start>logger.info("Problem setting default boto3 session: {}".format(e))<block_end><block_end><def_stmt>connect self<block_start><if_stmt>self.connection_type<is><none><block_start><raise>AttributeError("Could not determine connect type. Set client or resource.")<block_end><elif_stmt>self.connection_type<eq>"client"<block_start>client=boto3.client(self.service region_name=self.region)<line_sep>self.client=client<line_sep><return>self.client<block_end><elif_stmt>self.connection_type<eq>"resource"<block_start>resource=boto3.resource(self.service region_name=self.region)<line_sep>self.resource=resource<line_sep><return>self.resource<block_end><elif_stmt>self.connection_type<eq>"session"<block_start><try_stmt><block_start>session=boto3.Session(region_name=self.region profile_name=self.profile)<line_sep>logger.info("Returning session for default profile.")<block_end><except_stmt>Exception<as>e<block_start>logger.info("We are likely running on AWS instance.: {}".format(e))<line_sep>session=boto3.Session(region_name=self.region)<block_end><return>session<block_end><else_stmt><block_start><raise>AttributeError("Connection type is not supported.")<block_end><block_end><block_end>
<import_from_stmt>NENV *<import_stmt>asynchat<class_stmt>NodeBase(Node)<block_start><pass><block_end><class_stmt>Find_Prefix_At_End_Node(NodeBase)<block_start>""" """<line_sep>title='find_prefix_at_end'<line_sep>type_='asynchat'<line_sep>init_inputs=[NodeInputBP(label='haystack') NodeInputBP(label='needle') ]<line_sep>init_outputs=[NodeOutputBP(type_='data') ]<line_sep>color='#32DA22'<def_stmt>update_event self inp=-1<block_start>self.set_output_val(0 asynchat.find_prefix_at_end(self.input(0) self.input(1)))<block_end><block_end>export_nodes(Find_Prefix_At_End_Node )<line_sep>
<import_from_stmt>keras.models load_model<import_stmt>numpy<as>np<import_from_stmt>keras.optimizers Adam<import_from_stmt>keras.models Model<import_from_stmt>keras.layers Dense Conv2DTranspose Conv2D BatchNormalization Activation Concatenate Input MaxPool2D UpSampling2D ZeroPadding2D Lambda Add<import_from_stmt>keras.callbacks ModelCheckpoint<import_from_stmt>keras backend<as>K<import_stmt>keras<import_stmt>cv2<import_stmt>os<import_stmt>librosa<import_stmt>scipy<import_from_stmt>keras.utils plot_model<import_stmt>tensorflow<as>tf<import_from_stmt>keras.utils multi_gpu_model<import_from_stmt>discriminator contrastive_loss<class_stmt>ModelMGPU(Model)<block_start><def_stmt>__init__ self ser_model gpus<block_start>pmodel=multi_gpu_model(ser_model gpus)<line_sep>self.__dict__.update(pmodel.__dict__)<line_sep>self._smodel=ser_model<block_end><def_stmt>__getattribute__ self attrname<block_start>'''Override load and save methods to be used from the serial-model. The serial-model holds references to the weights in the multi-gpu model. '''<line_sep># return Model.__getattribute__(self, attrname) <if_stmt>'load'<in>attrname<or>'save'<in>attrname<block_start><return>getattr(self._smodel attrname)<block_end><return>super(ModelMGPU self).__getattribute__(attrname)<block_end><block_end><def_stmt>conv_block x num_filters kernel_size=3 strides=1 padding='same' act=<true><block_start>x=Conv2D(filters=num_filters kernel_size=kernel_size strides=strides padding=padding)(x)<line_sep>x=BatchNormalization(momentum=.8)(x)<if_stmt>act<block_start>x=Activation('relu')(x)<block_end><return>x<block_end><def_stmt>conv_t_block x num_filters kernel_size=3 strides=2 padding='same'<block_start>x=Conv2DTranspose(filters=num_filters kernel_size=kernel_size strides=strides padding=padding)(x)<line_sep>x=BatchNormalization(momentum=.8)(x)<line_sep>x=Activation('relu')(x)<line_sep><return>x<block_end><def_stmt>create_model args############# encoder for face/identity <block_start>input_face=Input(shape=(args.img_size args.img_size 6) name="input_face")<line_sep>identity_mapping=conv_block(input_face 32 kernel_size=11)# 96x96 x1_face=conv_block(identity_mapping 64 kernel_size=7 strides=2)# 48x48 x2_face=conv_block(x1_face 128 5 2)# 24x24 x3_face=conv_block(x2_face 256 3 2)#12x12 x4_face=conv_block(x3_face 512 3 2)#6x6 x5_face=conv_block(x4_face 512 3 2)#3x3 x6_face=conv_block(x5_face 512 3 1 padding='valid')<line_sep>x7_face=conv_block(x6_face 256 1 1)<line_sep>############# encoder for audio input_audio=Input(shape=(12 35 1) name="input_audio")<line_sep>x=conv_block(input_audio 64)<line_sep>x=conv_block(input_audio 128)<line_sep>x=ZeroPadding2D(((1 0) (0 0)))(x)<line_sep>x=conv_block(x 256 strides=(1 2))<line_sep>x=conv_block(x 256)<line_sep>x=conv_block(x 256 strides=2)<line_sep>x=conv_block(x 512 strides=2)<line_sep>x=conv_block(x 512 (4 5) 1 padding='valid')<line_sep>x=conv_block(x 256 1 1)<line_sep>embedding=Concatenate(axis=3)([x7_face x])<line_sep>############# decoder x=conv_block(embedding 512 1)<line_sep>x=conv_t_block(embedding 512 3 3)# 3x3 x=Concatenate(axis=3)([x5_face x])<line_sep>x=conv_t_block(x 512)#6x6 x=Concatenate(axis=3)([x4_face x])<line_sep>x=conv_t_block(x 256)#12x12 x=Concatenate(axis=3)([x3_face x])<line_sep>x=conv_t_block(x 128)#24x24 x=Concatenate(axis=3)([x2_face x])<line_sep>x=conv_t_block(x 64)#48x48 x=Concatenate(axis=3)([x1_face x])<line_sep>x=conv_t_block(x 32)#96x96 x=Concatenate(axis=3)([identity_mapping x])<line_sep>x=conv_block(x 16)#96x96 x=conv_block(x 16)#96x96 x=Conv2D(filters=3 kernel_size=1 strides=1 padding="same")(x)<line_sep>prediction=Activation("sigmoid" name="prediction")(x)<line_sep>model=Model(inputs=[input_face input_audio] outputs=prediction)<line_sep>model.summary()<line_sep>ser_model=model<if_stmt>args.n_gpu<g>1<block_start>parallel_model=ModelMGPU(ser_model args.n_gpu)<block_end><else_stmt><block_start>parallel_model=ser_model<block_end>parallel_model.compile(loss='mae' optimizer=(Adam(lr=args.lr)<if>hasattr(args 'lr')<else>'adam'))<line_sep><return>parallel_model ser_model<block_end><def_stmt>create_model_residual args<block_start><def_stmt>residual_block inp num_filters<block_start>x=conv_block(inp num_filters)<line_sep>x=conv_block(x num_filters)<line_sep>x=Add()([x inp])<line_sep>x=Activation('relu')(x)<line_sep><return>x<block_end>############# encoder for face/identity input_face=Input(shape=(args.img_size args.img_size 6) name="input_face")<line_sep>identity_mapping=conv_block(input_face 32 kernel_size=7)# 96x96 x1_face=conv_block(identity_mapping 64 kernel_size=5 strides=2)# 48x48 x1_face=residual_block(x1_face 64)<line_sep>x1_face=residual_block(x1_face 64)<line_sep>x2_face=conv_block(x1_face 128 3 2)# 24x24 x2_face=residual_block(x2_face 128)<line_sep>x2_face=residual_block(x2_face 128)<line_sep>x2_face=residual_block(x2_face 128)<line_sep>x3_face=conv_block(x2_face 256 3 2)#12x12 x3_face=residual_block(x3_face 256)<line_sep>x3_face=residual_block(x3_face 256)<line_sep>x4_face=conv_block(x3_face 512 3 2)#6x6 x4_face=residual_block(x4_face 512)<line_sep>x4_face=residual_block(x4_face 512)<line_sep>x5_face=conv_block(x4_face 512 3 2)#3x3 x6_face=conv_block(x5_face 512 3 1 padding='valid')<line_sep>x7_face=conv_block(x6_face 512 1 1)<line_sep>############# encoder for audio input_audio=Input(shape=(12 35 1) name="input_audio")<line_sep>x=conv_block(input_audio 128)<line_sep>x=residual_block(x 128)<line_sep>x=residual_block(x 128)<line_sep>x=residual_block(x 128)<line_sep>x=ZeroPadding2D(((1 0) (0 0)))(x)<line_sep>x=conv_block(x 256 strides=(1 2))<line_sep>x=residual_block(x 256)<line_sep>x=residual_block(x 256)<line_sep>x=conv_block(x 512 strides=2)<line_sep>x=residual_block(x 512)<line_sep>x=residual_block(x 512)<line_sep>x=conv_block(x 512 strides=2)<line_sep>x=residual_block(x 512)<line_sep>x=conv_block(x 512 (4 5) 1 padding='valid')<line_sep>x=conv_block(x 512 1 1)<line_sep>embedding=Concatenate(axis=3)([x7_face x])<line_sep>############# decoder x=conv_t_block(embedding 512 3 3)# 3x3 x=Concatenate(axis=3)([x5_face x])<line_sep>x=conv_t_block(x 512)#6x6 x=residual_block(x 512)<line_sep>x=residual_block(x 512)<line_sep>x=Concatenate(axis=3)([x4_face x])<line_sep>x=conv_t_block(x 256)#12x12 x=residual_block(x 256)<line_sep>x=residual_block(x 256)<line_sep>x=Concatenate(axis=3)([x3_face x])<line_sep>x=conv_t_block(x 128)#24x24 x=residual_block(x 128)<line_sep>x=residual_block(x 128)<line_sep>x=Concatenate(axis=3)([x2_face x])<line_sep>x=conv_t_block(x 64)#48x48 x=residual_block(x 64)<line_sep>x=residual_block(x 64)<line_sep>x=Concatenate(axis=3)([x1_face x])<line_sep>x=conv_t_block(x 32)#96x96 x=Concatenate(axis=3)([identity_mapping x])<line_sep>x=conv_block(x 16)#96x96 x=conv_block(x 16)#96x96 x=Conv2D(filters=3 kernel_size=1 strides=1 padding="same")(x)<line_sep>prediction=Activation("sigmoid" name="prediction")(x)<line_sep>model=Model(inputs=[input_face input_audio] outputs=prediction)<line_sep>model.summary()<if_stmt>args.n_gpu<g>1<block_start>model=ModelMGPU(model args.n_gpu)<block_end>model.compile(loss='mae' optimizer=(Adam(lr=args.lr)<if>hasattr(args 'lr')<else>'adam'))<line_sep><return>model<block_end><def_stmt>create_combined_model generator discriminator args<block_start>input_face=Input(shape=(args.img_size args.img_size 6) name="input_face_comb")<line_sep>input_audio=Input(shape=(12 35 1) name="input_audio_comb")<line_sep>fake_face=generator([input_face input_audio])<line_sep>discriminator.trainable=<false><line_sep>d=discriminator([fake_face input_audio])<line_sep>model=Model([input_face input_audio] [fake_face d])<if_stmt>args.n_gpu<g>1<block_start>model=ModelMGPU(model args.n_gpu)<block_end>model.compile(loss=['mae' contrastive_loss] optimizer=(Adam(lr=args.lr)<if>hasattr(args 'lr')<else>'adam') loss_weights=[1. .01])<line_sep><return>model<block_end><if_stmt>__name__<eq>'__main__'<block_start>model=create_model_residual()<line_sep>#plot_model(model, to_file='model.png', show_shapes=True) <block_end>
# Lint as: python3 # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for recommendation_model_launcher."""<import_stmt>os<import_from_stmt>absl flags<import_stmt>tensorflow<as>tf<import_from_stmt>model input_pipeline<import_from_stmt>model recommendation_model_launcher<as>launcher<import_from_stmt>google.protobuf text_format<line_sep>FLAGS=flags.FLAGS<line_sep>FAKE_MOVIE_GENRE_VOCAB=['UNK' 'Comedy' 'Drama' 'Romance' 'Animation' 'Children']<line_sep>TEST_INPUT_CONFIG=""" activity_feature_groups { features { feature_name: "context_movie_id" feature_type: INT vocab_size: 3952 embedding_dim: 8 feature_length: 5 } features { feature_name: "context_movie_rating" feature_type: FLOAT feature_length: 5 } encoder_type: CNN } activity_feature_groups { features { feature_name: "context_movie_genre" feature_type: STRING vocab_name: "movie_genre_vocab.txt" vocab_size: 19 embedding_dim: 8 feature_length: 8 } encoder_type: BOW } label_feature { feature_name: "label_movie_id" feature_type: INT vocab_size: 3952 embedding_dim: 8 feature_length: 1 } """<line_sep>EXAMPLE1=text_format.Parse(""" features { feature { key: "context_movie_id" value { int64_list { value: [1, 2, 0, 0, 0] } } } feature { key: "context_movie_rating" value { float_list { value: [3.5, 4.0, 0.0, 0.0, 0.0] } } } feature { key: "context_movie_genre" value { bytes_list { value: [ "Animation", "Children", "Comedy", "Comedy", "Romance", "UNK", "UNK", "UNK" ] } } } feature { key: "label_movie_id" value { int64_list { value: [3] } } } }""" tf.train.Example())<class_stmt>RecommendationModelLauncherTest(tf.test.TestCase)<block_start><def_stmt>_AssertSparseTensorValueEqual self a b<block_start>self.assertAllEqual(a.indices b.indices)<line_sep>self.assertAllEqual(a.values b.values)<line_sep>self.assertAllEqual(a.dense_shape b.dense_shape)<block_end><def_stmt>_assertInputDetail self input_details index name shape<block_start>self.assertEqual(name input_details[index]['name'])<line_sep>self.assertEqual(shape input_details[index]['shape'])<block_end><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.tmp_dir=self.create_tempdir()<line_sep>self.test_input_config_file=os.path.join(self.tmp_dir 'input_config.pbtxt')<line_sep>self.test_movie_genre_vocab_file=os.path.join(self.tmp_dir 'movie_genre_vocab.txt')<line_sep>self.test_input_data_file=os.path.join(self.tmp_dir 'test_input_data.tfrecord')<with_stmt>open(self.test_input_config_file 'w' encoding='utf-8')<as>f<block_start>f.write(TEST_INPUT_CONFIG)<block_end><with_stmt>open(self.test_movie_genre_vocab_file 'w' encoding='utf-8')<as>f<block_start><for_stmt>item FAKE_MOVIE_GENRE_VOCAB<block_start>f.write(item+'\n')<block_end><block_end><with_stmt>tf.io.TFRecordWriter(self.test_input_data_file)<as>file_writer<block_start>file_writer.write(EXAMPLE1.SerializeToString())<block_end>self.test_model_dir=os.path.join(self.tmp_dir 'test_model_dir')<line_sep>FLAGS.training_data_filepattern=self.test_input_data_file<line_sep>FLAGS.testing_data_filepattern=self.test_input_data_file<line_sep>FLAGS.input_config_file=self.test_input_config_file<line_sep>FLAGS.model_dir=self.test_model_dir<line_sep>FLAGS.hidden_layer_dims=[8 4]<line_sep>FLAGS.eval_top_k=[1 5]<line_sep>FLAGS.num_predictions=5<line_sep>FLAGS.conv_num_filter_ratios=[2 4]<line_sep>FLAGS.conv_kernel_size=4<line_sep>FLAGS.lstm_num_units=16<block_end><def_stmt>testModelTrainEvalExport self<block_start>"""Verifies that model can be trained and evaluated."""<line_sep>tf.io.gfile.mkdir(FLAGS.model_dir)<line_sep>input_config=launcher.load_input_config()<line_sep>model_config=launcher.prepare_model_config()<line_sep>dataset=input_pipeline.get_input_dataset(data_filepattern=self.test_input_data_file input_config=input_config vocab_file_dir=self.tmp_dir batch_size=8)<line_sep>model=launcher.build_keras_model(input_config model_config)<line_sep>launcher.train_and_eval(model=model model_dir=FLAGS.model_dir train_input_dataset=dataset eval_input_dataset=dataset steps_per_epoch=2 epochs=2 eval_steps=1)<line_sep>self.assertTrue(os.path.exists(self.test_model_dir))<line_sep>summaries_dir=os.path.join(self.test_model_dir 'summaries')<line_sep>self.assertTrue(os.path.exists(summaries_dir))<line_sep>export_dir=os.path.join(FLAGS.model_dir 'export')<line_sep>latest_checkpoint=tf.train.latest_checkpoint(FLAGS.model_dir)<line_sep>launcher.save_model(checkpoint_path=latest_checkpoint export_dir=export_dir input_config=input_config model_config=model_config)<line_sep>savedmodel_path=os.path.join(export_dir 'saved_model.pb')<line_sep>self.assertTrue(os.path.exists(savedmodel_path))<line_sep>imported=tf.saved_model.load(export_dir tags=<none>)<line_sep>infer=imported.signatures['serving_default']<line_sep>context_movie_id=tf.range(5 dtype=tf.int32)<line_sep>context_movie_rating=tf.range(5 dtype=tf.float32)<line_sep>context_movie_genre=tf.range(8 dtype=tf.int32)<line_sep>predictions=infer(context_movie_id=context_movie_id context_movie_rating=context_movie_rating context_movie_genre=context_movie_genre)<line_sep>self.assertAllEqual([5] predictions['top_prediction_ids'].shape)<line_sep>self.assertAllEqual([5] predictions['top_prediction_scores'].shape)<line_sep>launcher.export_tflite(export_dir)<line_sep>tflite_model_path=os.path.join(export_dir 'model.tflite')<line_sep>self.assertTrue(os.path.exists(tflite_model_path))<line_sep>f=open(tflite_model_path 'rb')<line_sep>interpreter=tf.lite.Interpreter(model_content=f.read())<line_sep>interpreter.allocate_tensors()<line_sep>inference_signature=interpreter.get_signature_list()['serving_default']<line_sep>self.assertAllEqual(['context_movie_genre' 'context_movie_id' 'context_movie_rating'] inference_signature['inputs'])<line_sep>self.assertAllEqual(['top_prediction_ids' 'top_prediction_scores'] inference_signature['outputs'])<line_sep>serving_name_to_tenors={'serving_default_context_movie_id:0':context_movie_id 'serving_default_context_movie_rating:0':context_movie_rating 'serving_default_context_movie_genre:0':context_movie_genre}<line_sep>input_details=interpreter.get_input_details()<line_sep>output_details=interpreter.get_output_details()<line_sep>indice_to_tensors={}<for_stmt>input_detail input_details<block_start>indice_to_tensors[input_detail['index']]=serving_name_to_tenors[input_detail['name']]<block_end><for_stmt>index,tensor indice_to_tensors.items()<block_start>interpreter.set_tensor(index tensor)<block_end>interpreter.invoke()<line_sep>tflite_top_predictions_ids=interpreter.get_tensor(output_details[0]['index'])<line_sep>tflite_top_prediction_scores=interpreter.get_tensor(output_details[1]['index'])<line_sep>self.assertAllEqual([5] tflite_top_predictions_ids.shape)<line_sep>self.assertAllEqual([5] tflite_top_prediction_scores.shape)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>launcher.define_flags()<line_sep>tf.test.main()<block_end>
<import_from_future_stmt> absolute_import<import_stmt>unittest<import_from_stmt>src.solver Solver<class_stmt>TestSolver(unittest.TestCase)<block_start><def_stmt>test_ctor self<block_start>solver=Solver("" "" <false>)<line_sep>self.assertEqual(solver.name "")<line_sep>self.assertEqual(solver.quiet_mode <false>)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_stmt>app.lib.base.provider Provider<import_from_stmt>app.lib.api.base ApiBase<import_from_stmt>app.lib.api.definitions.record Record<class_stmt>ApiRecords(ApiBase)<block_start><def_stmt>all self user_id zone_id=<none> domain=<none><block_start>provider=Provider()<line_sep>zones=provider.dns_zones()<line_sep>records=provider.dns_records()<line_sep>zone=zones.get(zone_id user_id)<if>zone_id<is><not><none><else>zones.find(domain user_id=user_id)<if_stmt><not>zone<block_start><return>self.send_not_found_response()<block_end>results=records.get_zone_records(zone.id)<line_sep>data=[]<for_stmt>result results<block_start>data.append(self.__load_record(result))<block_end><return>self.send_valid_response(data)<block_end><def_stmt>one self user_id record_id zone_id=<none> domain=<none><block_start>provider=Provider()<line_sep>zones=provider.dns_zones()<line_sep>records=provider.dns_records()<line_sep>zone=zones.get(zone_id user_id)<if>zone_id<is><not><none><else>zones.find(domain user_id=user_id)<if_stmt><not>zone<block_start><return>self.send_not_found_response()<block_end>record=records.get(record_id dns_zone_id=zone_id)<if_stmt><not>record<block_start><return>self.send_not_found_response()<block_end><return>self.send_valid_response(self.__load_record(record))<block_end><def_stmt>__load_record self item<block_start>record=Record()<line_sep>record.id=item.id<line_sep>record.zone_id=item.dns_zone_id<line_sep>record.active=item.active<line_sep>record.cls=item.cls<line_sep>record.type=item.type<line_sep>record.ttl=int(item.ttl)<line_sep>record.data=item.data<line_sep>record.is_conditional=item.has_conditional_responses<line_sep>record.conditional_count=item.conditional_count<line_sep>record.conditional_limit=item.conditional_limit<line_sep>record.confitional_reset=item.conditional_reset<line_sep>record.conditional_data=item.conditional_data<line_sep><return>record<block_end><def_stmt>classes self<block_start>records=Provider().dns_records()<line_sep><return>self.send_valid_response(records.get_classes())<block_end><def_stmt>types self<block_start>records=Provider().dns_records()<line_sep><return>self.send_valid_response(records.get_types())<block_end><def_stmt>delete self user_id record_id zone_id=<none> domain=<none><block_start>provider=Provider()<line_sep>zones=provider.dns_zones()<line_sep>records=provider.dns_records()<line_sep>zone=zones.get(zone_id user_id)<if>zone_id<is><not><none><else>zones.find(domain user_id=user_id)<if_stmt><not>zone<block_start><return>self.send_not_found_response()<block_end>record=records.get(record_id dns_zone_id=zone_id)<if_stmt><not>record<block_start><return>self.send_not_found_response()<block_end>records.delete(record)<line_sep><return>self.send_success_response()<block_end><def_stmt>create self user_id zone_id=<none> domain=<none><block_start>provider=Provider()<line_sep>zones=provider.dns_zones()<line_sep>records=provider.dns_records()<line_sep>zone=zones.get(zone_id user_id)<if>zone_id<is><not><none><else>zones.find(domain user_id=user_id)<if_stmt><not>zone<block_start><return>self.send_not_found_response()<block_end># First get the mandatory fields for all record types. required_fields=['class' 'type' 'ttl' 'active' 'data' 'is_conditional' 'conditional_count' 'conditional_limit' 'conditional_reset' 'conditional_data']<line_sep>data=self.get_json(required_fields)<if_stmt>data<is><false><block_start><return>self.send_error_response(5000 'Missing fields' 'Required fields are: {0}'.format(', '.join(required_fields)))<block_end># Validate. <if_stmt>data['class']<not><in>records.get_classes()<block_start><return>self.send_error_response(5005 'Invalid class' '')<block_end><elif_stmt>data['type']<not><in>records.get_types()<block_start><return>self.send_error_response(5005 'Invalid type' '')<block_end><if_stmt>isinstance(data['ttl'] str)<and>data['ttl'].isdigit()<is><false><block_start><return>self.send_error_response(5005 'Invalid TTL' '')<block_end>data['ttl']=int(data['ttl'])<if_stmt>data['ttl']<l>0<block_start><return>self.send_error_response(5005 'Invalid TTL' '')<block_end><elif_stmt>data['conditional_count']<l>0<block_start><return>self.send_error_response(5005 'Invalid Conditional Count' '')<block_end><elif_stmt>data['conditional_limit']<l>0<block_start><return>self.send_error_response(5005 'Invalid Conditional Limit' '')<block_end># Fix types. data['active']=<true><if>data['active']<else><false><line_sep>data['is_conditional']=<true><if>data['is_conditional']<else><false><line_sep># Now that we have the type, we can get the type-specific properties. record_type_properties=records.get_record_type_properties(data['type'] clean=<true>)<line_sep>record_type_conditional_properties=records.get_record_type_properties(data['type'] clean=<true>)<line_sep>all_errors=[]<line_sep>basic_data,errors=self.__parse_data_properties(data['data'] record_type_properties)<line_sep>all_errors<augadd>errors<if_stmt>data['is_conditional']<block_start>conditional_data,errors=self.__parse_data_properties(data['conditional_data'] record_type_conditional_properties)<line_sep>all_errors<augadd>errors<block_end><else_stmt><block_start>conditional_data={}<block_end><if_stmt>len(errors)<g>0<block_start><return>self.send_error_response(5005 'Invalid type property fields' errors)<block_end># Create the record. record=records.create()<line_sep>record=records.save(record zone.id data['ttl'] data['class'] data['type'] basic_data data['active'])<line_sep>record=records.save_conditions(record enabled=data['is_conditional'] data=conditional_data count=data['conditional_count'] limit=data['conditional_limit'] reset=data['conditional_reset'])<line_sep><return>self.one(user_id record.id zone_id=zone.id)<block_end><def_stmt>__parse_data_properties self data properties<block_start>errors=[]<line_sep>output={}<for_stmt>property,type properties.items()<block_start><if_stmt>property<not><in>data<block_start>errors.append('Missing type property {0}'.format(property))<line_sep><continue><block_end>value=data[property]<if_stmt>(type<eq>'int')<and>(isinstance(value str))<block_start><if_stmt><not>value.isdigit()<block_start>errors.append('Invalid {0} value'.format(property))<line_sep><continue><block_end>value=int(value)<block_end><if_stmt>(type<eq>'str')<and>(len(value)<eq>0)<block_start>errors.append('Invalid {0} value'.format(property))<block_end><elif_stmt>(type<eq>'int')<and>(value<l>0)<block_start>errors.append('Invalid {0} value'.format(property))<block_end>output[property]=value<block_end><return>output errors<block_end><def_stmt>update self user_id record_id zone_id=<none> domain=<none><block_start>provider=Provider()<line_sep>zones=provider.dns_zones()<line_sep>records=provider.dns_records()<line_sep>zone=zones.get(zone_id user_id)<if>zone_id<is><not><none><else>zones.find(domain user_id=user_id)<if_stmt><not>zone<block_start><return>self.send_not_found_response()<block_end># Get record. record=records.get(record_id dns_zone_id=zone.id)<if_stmt><not>record<block_start><return>self.send_not_found_response()<block_end>data=self.get_json([])<if_stmt>'class'<in>data<block_start><if_stmt>data['class']<not><in>records.get_classes()<block_start><return>self.send_error_response(5005 'Invalid class' '')<block_end><block_end><else_stmt><block_start>data['class']=record.cls<block_end><if_stmt>'type'<in>data<block_start><if_stmt>data['type']<not><in>records.get_types()<block_start><return>self.send_error_response(5005 'Invalid type' '')<block_end><block_end><else_stmt><block_start>data['type']=record.type<block_end><if_stmt>'ttl'<in>data<block_start><if_stmt>isinstance(data['ttl'] str)<block_start><if_stmt><not>data['ttl'].isdigit()<block_start><return>self.send_error_response(5005 'Invalid TTL' '')<block_end>data['ttl']=int(data['ttl'])<block_end><if_stmt>data['ttl']<l>0<block_start><return>self.send_error_response(5005 'Invalid TTL' '')<block_end><block_end><else_stmt><block_start>data['ttl']=record.ttl<block_end><if_stmt>'active'<in>data<block_start>data['active']=<true><if>data['active']<else><false><block_end><else_stmt><block_start>data['active']=record.active<block_end><if_stmt>'is_conditional'<in>data<block_start>data['is_conditional']=<true><if>data['is_conditional']<else><false><block_end><else_stmt><block_start>data['is_conditional']=record.has_conditional_responses<block_end>data['conditional_limit']=data['conditional_limit']<if>'conditional_limit'<in>data<else>record.conditional_limit<line_sep>data['conditional_count']=data['conditional_count']<if>'conditional_count'<in>data<else>record.conditional_count<line_sep>data['conditional_reset']=data['conditional_reset']<if>'conditional_reset'<in>data<else>record.conditional_reset<if_stmt>'data'<in>data<block_start>record_type_properties=records.get_record_type_properties(data['type'] clean=<true>)<line_sep>data['data'],errors=self.__parse_data_properties(data['data'] record_type_properties)<if_stmt>len(errors)<g>0<block_start><return>self.send_error_response(5005 'Invalid type property fields' errors)<block_end><block_end><else_stmt><block_start>data['data']=record.data<block_end><if_stmt>('conditional_data'<in>data)<and>(data['is_conditional']<is><true>)<block_start>record_type_properties=records.get_record_type_properties(data['type'] clean=<true>)<line_sep>data['conditional_data'],errors=self.__parse_data_properties(data['conditional_data'] record_type_properties)<if_stmt>len(errors)<g>0<block_start><return>self.send_error_response(5005 'Invalid type property fields' errors)<block_end><block_end><else_stmt><block_start>data['conditional_data']=record.conditional_data<block_end>record=records.save(record zone.id data['ttl'] data['class'] data['type'] data['data'] data['active'])<line_sep>record=records.save_conditions(record enabled=data['is_conditional'] data=data['conditional_data'] count=data['conditional_count'] limit=data['conditional_limit'] reset=data['conditional_reset'])<line_sep><return>self.one(user_id record.id zone_id=zone.id)<block_end><block_end>
<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_ run_module_suite<import_from_stmt>qutip smesolve mesolve photocurrent_mesolve liouvillian QobjEvo spre spost destroy coherent parallel_map qeye fock_dm general_stochastic ket2dm num <def_stmt>f t args<block_start><return>args["a"]<times>t<block_end>@pytest.mark.slow<def_stmt>test_smesolve_homodyne_methods <block_start>"Stochastic: smesolve: homodyne methods with single jump operator"<def_stmt>arccoth x<block_start><return>0.5<times>np.log((1.+x)/(x-1.))<block_end>th=0.1# Interaction parameter alpha=np.cos(th)<line_sep>beta=np.sin(th)<line_sep>gamma=1.<line_sep>N=30# number of Fock states Id=qeye(N)<line_sep>a=destroy(N)<line_sep>s=0.5<times>((alpha+beta)<times>a+(alpha-beta)<times>a.dag())<line_sep>x=(a+a.dag())<times>2<power>-0.5<line_sep>H=Id<line_sep>c_op=[gamma<power>0.5<times>a]<line_sep>sc_op=[s]<line_sep>e_op=[x x<times>x]<line_sep>rho0=fock_dm(N 0)# initial vacuum state T=3.# final time # number of time steps for which we save the expectation values N_store=121<line_sep>Nsub=10<line_sep>tlist=np.linspace(0 T N_store)<line_sep>ddt=(tlist[1]-tlist[0])<line_sep>#### Analytic solution y0=0.5<line_sep>A=(gamma<power>2+alpha<power>2<times>(beta<power>2+4<times>gamma)-2<times>alpha<times>beta<times>gamma)<power>0.5<line_sep>B=arccoth((-4<times>alpha<power>2<times>y0+alpha<times>beta-gamma)/A)<line_sep>y_an=(alpha<times>beta-gamma+A/np.tanh(0.5<times>A<times>tlist-B))/(4<times>alpha<power>2)<line_sep>list_methods_tol=[['euler-maruyama' 2e-2] ['pc-euler' 2e-3] ['pc-euler-2' 2e-3] ['platen' 1e-3] ['milstein' 1e-3] ['milstein-imp' 1e-3] ['rouchon' 1e-3] ['taylor1.5' 1e-4] ['taylor1.5-imp' 1e-4] ['explicit1.5' 1e-4] ['taylor2.0' 1e-4]]<for_stmt>n_method list_methods_tol<block_start>sol=smesolve(H rho0 tlist c_op sc_op e_op nsubsteps=Nsub method='homodyne' solver=n_method[0])<line_sep>sol2=smesolve(H rho0 tlist c_op sc_op e_op store_measurement=0 nsubsteps=Nsub method='homodyne' solver=n_method[0] noise=sol.noise)<line_sep>sol3=smesolve(H rho0 tlist c_op sc_op e_op nsubsteps=Nsub<times>5 method='homodyne' solver=n_method[0] tol=1e-8)<line_sep>err=1/T<times>np.sum(np.abs(y_an-(sol.expect[1]-sol.expect[0]<times>sol.expect[0].conj())))<times>ddt<line_sep>err3=1/T<times>np.sum(np.abs(y_an-(sol3.expect[1]-sol3.expect[0]<times>sol3.expect[0].conj())))<times>ddt<line_sep>print(n_method[0] ': deviation =' err ', tol =' n_method[1])<line_sep>assert_(err<l>n_method[1])<line_sep># 5* more substep should decrease the error assert_(err3<l>err)<line_sep># just to check that noise is not affected by smesolve assert_(np.all(sol.noise<eq>sol2.noise))<line_sep>assert_(np.all(sol.expect[0]<eq>sol2.expect[0]))<block_end>sol=smesolve(H rho0 tlist[:2] c_op sc_op e_op noise=10 ntraj=2 nsubsteps=Nsub method='homodyne' solver='euler' store_measurement=1)<line_sep>sol2=smesolve(H rho0 tlist[:2] c_op sc_op e_op noise=10 ntraj=2 nsubsteps=Nsub method='homodyne' solver='euler' store_measurement=0)<line_sep>sol3=smesolve(H rho0 tlist[:2] c_op sc_op e_op noise=11 ntraj=2 nsubsteps=Nsub method='homodyne' solver='euler')<line_sep># sol and sol2 have the same seed, sol3 differ. assert_(np.all(sol.noise<eq>sol2.noise))<line_sep>assert_(np.all(sol.noise<ne>sol3.noise))<line_sep>assert_(<not>np.all(sol.measurement[0]<eq>0.+0j))<line_sep>assert_(np.all(sol2.measurement[0]<eq>0.+0j))<line_sep>sol=smesolve(H rho0 tlist[:2] c_op sc_op e_op noise=np.array([1 2]) ntraj=2 nsubsteps=Nsub method='homodyne' solver='euler')<line_sep>sol2=smesolve(H rho0 tlist[:2] c_op sc_op e_op noise=np.array([2 1]) ntraj=2 nsubsteps=Nsub method='homodyne' solver='euler')<line_sep># sol and sol2 have the seed of traj 1 and 2 reversed. assert_(np.all(sol.noise[0 : : :]<eq>sol2.noise[1 : : :]))<line_sep>assert_(np.all(sol.noise[1 : : :]<eq>sol2.noise[0 : : :]))<block_end><def_stmt>test_smesolve_photocurrent <block_start>"Stochastic: photocurrent_mesolve"<line_sep>tol=0.01<line_sep>N=4<line_sep>gamma=0.25<line_sep>ntraj=20<line_sep>nsubsteps=100<line_sep>a=destroy(N)<line_sep>H=[[a.dag()<times>a f]]<line_sep>psi0=coherent(N 0.5)<line_sep>sc_ops=[np.sqrt(gamma)<times>a np.sqrt(gamma)<times>a<times>0.5]<line_sep>e_ops=[a.dag()<times>a a+a.dag() (-1j)<times>(a-a.dag())]<line_sep>times=np.linspace(0 1.0 21)<line_sep>res_ref=mesolve(H psi0 times sc_ops e_ops args={"a":2})<line_sep>res=photocurrent_mesolve(H psi0 times [] sc_ops e_ops args={"a":2} ntraj=ntraj nsubsteps=nsubsteps store_measurement=<true> map_func=parallel_map)<line_sep>assert_(all([np.mean(abs(res.expect[idx]-res_ref.expect[idx]))<l>tol<for>idx range(len(e_ops))]))<line_sep>assert_(len(res.measurement)<eq>ntraj)<line_sep>assert_(all([m.shape<eq>(len(times) len(sc_ops))<for>m res.measurement]))<block_end><def_stmt>test_smesolve_homodyne <block_start>"Stochastic: smesolve: homodyne, time-dependent H"<line_sep>tol=0.01<line_sep>N=4<line_sep>gamma=0.25<line_sep>ntraj=20<line_sep>nsubsteps=100<line_sep>a=destroy(N)<line_sep>H=[[a.dag()<times>a f]]<line_sep>psi0=coherent(N 0.5)<line_sep>sc_ops=[np.sqrt(gamma)<times>a np.sqrt(gamma)<times>a<times>0.5]<line_sep>e_ops=[a.dag()<times>a a+a.dag() (-1j)<times>(a-a.dag())]<line_sep>times=np.linspace(0 1.0 21)<line_sep>res_ref=mesolve(H psi0 times sc_ops e_ops args={"a":2})<line_sep>list_methods_tol=['euler-maruyama' 'pc-euler' 'pc-euler-2' 'platen' 'milstein' 'milstein-imp' 'rouchon' 'taylor15' 'taylor15-imp' 'explicit15']<for_stmt>solver list_methods_tol<block_start>res=smesolve(H psi0 times [] sc_ops e_ops ntraj=ntraj nsubsteps=nsubsteps args={"a":2} method='homodyne' store_measurement=<true> solver=solver map_func=parallel_map)<line_sep>assert_(all([np.mean(abs(res.expect[idx]-res_ref.expect[idx]))<l>tol<for>idx range(len(e_ops))]))<line_sep>assert_(len(res.measurement)<eq>ntraj)<line_sep>assert_(all([m.shape<eq>(len(times) len(sc_ops))<for>m res.measurement]))<block_end><block_end>@pytest.mark.slow<def_stmt>test_smesolve_heterodyne <block_start>"Stochastic: smesolve: heterodyne, time-dependent H"<line_sep>tol=0.01<line_sep>N=4<line_sep>gamma=0.25<line_sep>ntraj=20<line_sep>nsubsteps=100<line_sep>a=destroy(N)<line_sep>H=[[a.dag()<times>a f]]<line_sep>psi0=coherent(N 0.5)<line_sep>sc_ops=[np.sqrt(gamma)<times>a np.sqrt(gamma)<times>a<times>0.5]<line_sep>e_ops=[a.dag()<times>a a+a.dag() (-1j)<times>(a-a.dag())]<line_sep>times=np.linspace(0 1.0 21)<line_sep>res_ref=mesolve(H psi0 times sc_ops e_ops args={"a":2})<line_sep>list_methods_tol=['euler-maruyama' 'pc-euler' 'pc-euler-2' 'platen' 'milstein' 'milstein-imp' 'rouchon' 'taylor15' 'taylor15-imp' 'explicit15']<for_stmt>solver list_methods_tol<block_start>res=smesolve(H psi0 times [] sc_ops e_ops ntraj=ntraj nsubsteps=nsubsteps args={"a":2} method='heterodyne' store_measurement=<true> solver=solver map_func=parallel_map)<line_sep>assert_(all([np.mean(abs(res.expect[idx]-res_ref.expect[idx]))<l>tol<for>idx range(len(e_ops))]))<line_sep>assert_(len(res.measurement)<eq>ntraj)<line_sep>assert_(all([m.shape<eq>(len(times) len(sc_ops) 2)<for>m res.measurement]))<block_end><block_end>@pytest.mark.slow<def_stmt>test_general_stochastic <block_start>"Stochastic: general_stochastic"<line_sep>"Reproduce smesolve homodyne"<line_sep>tol=0.025<line_sep>N=4<line_sep>gamma=0.25<line_sep>ntraj=20<line_sep>nsubsteps=50<line_sep>a=destroy(N)<line_sep>H=[[a.dag()<times>a f]]<line_sep>psi0=coherent(N 0.5)<line_sep>sc_ops=[np.sqrt(gamma)<times>a np.sqrt(gamma)<times>a<times>0.5]<line_sep>e_ops=[a.dag()<times>a a+a.dag() (-1j)<times>(a-a.dag())]<line_sep>L=liouvillian(QobjEvo([[a.dag()<times>a f]] args={"a":2}) c_ops=sc_ops)<line_sep>L.compile()<line_sep>sc_opsM=[QobjEvo(spre(op)+spost(op.dag()))<for>op sc_ops]<line_sep>[op.compile()<for>op sc_opsM]<line_sep>e_opsM=[spre(op)<for>op e_ops]<def_stmt>d1 t vec<block_start><return>L.mul_vec(t vec)<block_end><def_stmt>d2 t vec<block_start>out=[]<for_stmt>op sc_opsM<block_start>out.append(op.mul_vec(t vec)-op.expect(t vec)<times>vec)<block_end><return>np.stack(out)<block_end>times=np.linspace(0 0.5 13)<line_sep>res_ref=mesolve(H psi0 times sc_ops e_ops args={"a":2})<line_sep>list_methods_tol=['euler-maruyama' 'platen' 'explicit15']<for_stmt>solver list_methods_tol<block_start>res=general_stochastic(ket2dm(psi0) times d1 d2 len_d2=2 e_ops=e_opsM normalize=<false> ntraj=ntraj nsubsteps=nsubsteps solver=solver)<block_end>assert_(all([np.mean(abs(res.expect[idx]-res_ref.expect[idx]))<l>tol<for>idx range(len(e_ops))]))<line_sep>assert_(len(res.measurement)<eq>ntraj)<block_end><def_stmt>f_dargs a args<block_start><return>args["expect_op_3"]-1<block_end><def_stmt>test_ssesolve_feedback <block_start>"Stochastic: ssesolve: time-dependent H with feedback"<line_sep>tol=0.01<line_sep>N=4<line_sep>ntraj=10<line_sep>nsubsteps=100<line_sep>a=destroy(N)<line_sep>H=[num(N)]<line_sep>psi0=coherent(N 2.5)<line_sep>sc_ops=[[a+a.dag() f_dargs]]<line_sep>e_ops=[a.dag()<times>a a+a.dag() (-1j)<times>(a-a.dag()) qeye(N)]<line_sep>times=np.linspace(0 10 101)<line_sep>res_ref=mesolve(H psi0 times sc_ops e_ops args={"expect_op_3":qeye(N)})<line_sep>res=smesolve(H psi0 times sc_ops=sc_ops e_ops=e_ops noise=1 ntraj=ntraj nsubsteps=nsubsteps method='homodyne' map_func=parallel_map args={"expect_op_3":qeye(N)})<line_sep>print(all([np.mean(abs(res.expect[idx]-res_ref.expect[idx]))<l>tol<for>idx range(len(e_ops))]))<block_end><if_stmt>__name__<eq>"__main__"<block_start>run_module_suite()<block_end>
<import_from_stmt>typing Dict List Union<import_from_stmt>typeguard check_argument_types<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>neuralmonkey.decoders.autoregressive AutoregressiveDecoder<import_from_stmt>neuralmonkey.decoders.sequence_labeler SequenceLabeler<import_from_stmt>neuralmonkey.decorators tensor<import_from_stmt>neuralmonkey.runners.base_runner BaseRunner<line_sep>SupportedDecoders=Union[AutoregressiveDecoder SequenceLabeler]<class_stmt>XentRunner(BaseRunner[SupportedDecoders])# pylint: disable=too-few-public-methods # Pylint issue here: https://github.com/PyCQA/pylint/issues/2607 <block_start><class_stmt>Executable(BaseRunner.Executable["XentRunner"])<block_start><def_stmt>collect_results self results:List[Dict]<arrow><none><block_start>xents=np.mean([res["xents"]<for>res results] axis=0)<line_sep>self.set_runner_result(outputs=xents.tolist() losses=[float(np.mean(xents))])<block_end><block_end># pylint: enable=too-few-public-methods <def_stmt>__init__ self output_series:str decoder:SupportedDecoders<arrow><none><block_start>check_argument_types()<line_sep>super().__init__(output_series decoder)<block_end>@tensor<def_stmt>fetches self<arrow>Dict[str tf.Tensor]<block_start><return>{"xents":self.decoder.train_xents}<block_end>@property<def_stmt>loss_names self<arrow>List[str]<block_start><return>["xent"]<block_end><block_end>