content
stringlengths
0
1.55M
<import_from_stmt>bs4 UnicodeDammit<def_stmt>file_should_not_contain_phrases filename offset=0 *phrases<block_start>"""don't fail _too_ hard if the file can't be read for some reason"""<with_stmt>open(filename "rb")<as>fp<block_start>raw=fp.read()[offset:]<block_end>text=<none><try_stmt><block_start>text=raw.decode("utf-8")<block_end><except_stmt>Exception<as>err<block_start>print("Failed to read" filename "forcing unicode...\n" err)<try_stmt><block_start>text=UnicodeDammit.detwingle(raw).decode("utf-8")<block_end><except_stmt>Exception<as>err<block_start>print("Failed to read" filename "giving up...\n" err)<line_sep>text=<none><block_end><block_end>matches={}<if_stmt>text<is><not><none><block_start><for_stmt>phrase phrases<block_start><if_stmt>phrase<in>text<block_start>matches[phrase]=<true><block_end><block_end><assert_stmt><not>matches "Phrases found in {}: {}".format(filename matches)<block_end><block_end>
default_app_config='demo.periods.apps.PeriodsConfig'<line_sep>
""" Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center). For example, this binary tree is symmetric: 1 / \ 2 2 / \ / \ 3 4 4 3 But the following is not: 1 / \ 2 2 \ \ 3 3 """<line_sep># Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None <class_stmt>Solution(object)<block_start><def_stmt>isSymmetric self root<block_start>""" :type root: TreeNode :rtype: bool """<if_stmt>root<is><none><block_start><return><true><block_end><if_stmt>root.left<is><none><and>root.right<is><none><block_start><return><true><block_end><if_stmt>root.left<is><not><none><and>root.right<is><not><none><block_start><return>self._isSymmetric(root.left root.right)<block_end><return><false><block_end><def_stmt>_isSymmetric self left right<block_start><if_stmt>left<is><none><and>right<is><none><block_start><return><true><block_end><if_stmt>left<is><not><none><and>right<is><not><none><block_start><return>(left.val<eq>right.val<and>self._isSymmetric(left.left right.right)<and>self._isSymmetric(left.right right.left))<block_end><return><false><block_end><block_end>
<import_from_stmt>django.contrib.auth.backends ModelBackend<class_stmt>TestClientBackend(ModelBackend)<block_start><pass><block_end><class_stmt>BackendWithoutGetUserMethod<block_start><pass><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("DQM")<line_sep># # DQM SERVICES # process.load("DQMServices.Core.DQM_cfg")<line_sep>process.load("FWCore.MessageService.MessageLogger_cfi")<line_sep># # DQM SOURCES # process.load("CondCore.DBCommon.CondDBSetup_cfi")<line_sep>#process.load("Configuration.GlobalRuns.ForceZeroTeslaField_cff") #process.load("Configuration.GlobalRuns.ReconstructionGR_cff") process.load("Configuration.StandardSequences.GeometryRecoDB_cff")<line_sep>#process.load("L1Trigger.Configuration.L1Config_cff") process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerScalesConfig_cff")<line_sep>process.load("L1TriggerConfig.L1ScalesProducers.L1MuTriggerPtScaleConfig_cff")<line_sep>process.load("L1TriggerConfig.L1GtConfigProducers.L1GtBoardMapsConfig_cff")<line_sep>process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")<line_sep>process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1030.L1Menu2008_2E30_Unprescaled_cff")<line_sep>#process.load("L1Trigger.HardwareValidation.L1HardwareValidation_cff") process.load("DQMServices.Components.DQMEnvironment_cfi")<line_sep># The GenMET is not in the edm root files. You have to produce it by yourself process.load("RecoMET.Configuration.GenMETParticles_cff")<line_sep>process.load("RecoMET.METProducers.genMetTrue_cfi")<line_sep>process.load("HLTriggerOffline.SUSYBSM.SUSYBSM_triggerValidation_cff")<line_sep>process.load("Geometry.CaloEventSetup.CaloTopology_cfi")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(10))<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring(#"file:/build/nuno/test31/CMSSW_3_1_0_pre5/src/TTbar_Tauola_cfi_py_GEN_FASTSIM_VALIDATION.root" '/store/relval/CMSSW_3_1_0_pre5/RelValQCD_Pt_80_120/GEN-SIM-RECO/IDEAL_31X_v1/0000/E63C1A00-0C2C-DE11-BFC1-000423D98800.root'))<line_sep>process.MessageLogger=cms.Service("MessageLogger" detailedInfo=cms.untracked.PSet(threshold=cms.untracked.string('INFO')) critical=cms.untracked.PSet(threshold=cms.untracked.string('ERROR')) debugModules=cms.untracked.vstring('*') cout=cms.untracked.PSet(threshold=cms.untracked.string('WARNING') WARNING=cms.untracked.PSet(limit=cms.untracked.int32(0)) noLineBreaks=cms.untracked.bool(<true>)) destinations=cms.untracked.vstring('detailedInfo' 'critical' 'cout'))<line_sep>process.p=cms.Path(process.genCandidatesForMET<times>process.genParticlesForMETAllVisible<times>process.genMetTrue<times>process.HLTSusyExoVal)<line_sep>process.pEnd=cms.Path(process.dqmSaver)<line_sep>process.DQMStore.verbose=0<line_sep>process.DQM.collectorHost=''<line_sep>process.dqmSaver.convention='Online'<line_sep>process.dqmSaver.saveByRun=1<line_sep>process.dqmSaver.saveAtJobEnd=<true><line_sep>
'''Autogenerated by xml_generate script, do not edit!'''<import_from_stmt>OpenGL platform<as>_p arrays<line_sep># Code generation uses this <import_from_stmt>OpenGL.raw.GL _types<as>_cs<line_sep># End users want this... <import_from_stmt>OpenGL.raw.GL._types *<import_from_stmt>OpenGL.raw.GL _errors<import_from_stmt>OpenGL.constant Constant<as>_C<import_stmt>ctypes<line_sep>_EXTENSION_NAME='GL_SUN_vertex'<def_stmt>_f function<block_start><return>_p.createFunction(function _p.PLATFORM.GL 'GL_SUN_vertex' error_checker=_errors._error_checker)<block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glColor3fVertex3fSUN r g b x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glColor3fVertex3fvSUN c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glColor4fNormal3fVertex3fSUN r g b a nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glColor4fNormal3fVertex3fvSUN c n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLfloat _cs.GLfloat)<def_stmt>glColor4ubVertex2fSUN r g b a x y<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLubyteArray arrays.GLfloatArray)<def_stmt>glColor4ubVertex2fvSUN c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glColor4ubVertex3fSUN r g b a x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLubyteArray arrays.GLfloatArray)<def_stmt>glColor4ubVertex3fvSUN c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glNormal3fVertex3fSUN nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glNormal3fVertex3fvSUN n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiColor3fVertex3fSUN rc r g b x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiColor3fVertex3fvSUN rc c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiColor4fNormal3fVertex3fSUN rc r g b a nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiColor4fNormal3fVertex3fvSUN rc c n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiColor4ubVertex3fSUN rc r g b a x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLubyteArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiColor4ubVertex3fvSUN rc c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiNormal3fVertex3fSUN rc nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiNormal3fVertex3fvSUN rc n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN rc s t r g b a nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN rc tc c n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN rc s t nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN rc tc n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiTexCoord2fVertex3fSUN rc s t x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiTexCoord2fVertex3fvSUN rc tc v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glReplacementCodeuiVertex3fSUN rc x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLuintArray arrays.GLfloatArray)<def_stmt>glReplacementCodeuiVertex3fvSUN rc v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord2fColor3fVertex3fSUN s t r g b x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glTexCoord2fColor3fVertex3fvSUN tc c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord2fColor4fNormal3fVertex3fSUN s t r g b a nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glTexCoord2fColor4fNormal3fVertex3fvSUN tc c n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLubyte _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord2fColor4ubVertex3fSUN s t r g b a x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLubyteArray arrays.GLfloatArray)<def_stmt>glTexCoord2fColor4ubVertex3fvSUN tc c v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord2fNormal3fVertex3fSUN s t nx ny nz x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glTexCoord2fNormal3fVertex3fvSUN tc n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord2fVertex3fSUN s t x y z<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glTexCoord2fVertex3fvSUN tc v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord4fColor4fNormal3fVertex4fSUN s t p q r g b a nx ny nz x y z w<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glTexCoord4fColor4fNormal3fVertex4fvSUN tc c n v<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat _cs.GLfloat)<def_stmt>glTexCoord4fVertex4fSUN s t p q x y z w<block_start><pass><block_end>@_f@_p.types(<none> arrays.GLfloatArray arrays.GLfloatArray)<def_stmt>glTexCoord4fVertex4fvSUN tc v<block_start><pass><block_end>
# @Time : 12/4/17 4:28 PM # @Author : <NAME>
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>os<import_stmt>polymetis<line_sep>PKG_ROOT_DIR=polymetis.__path__[0]<line_sep>DATA_DIR=os.path.join(PKG_ROOT_DIR "data")<def_stmt>get_full_path_to_urdf path:str<block_start>"""Gets the absolute path to a relative path of :code:`DATA_DIR`."""<if_stmt><not>os.path.isabs(path)<block_start>path=os.path.abspath(os.path.join(DATA_DIR path))<block_end><assert_stmt>os.path.exists(path) f"Invalid robot_description_path: {path}"<line_sep><return>path<block_end><def_stmt>which program:str<block_start>"""Equivalent of `which <https://en.wikipedia.org/wiki/Which_(command)>`_ program. Taken from https://stackoverflow.com/a/377028 Returns equivalent of $(which program), or None if unable to find it. Args: program: name of the executable to find. """<def_stmt>is_exe fpath<block_start><return>os.path.isfile(fpath)<and>os.access(fpath os.X_OK)<block_end>fpath,_fname=os.path.split(program)<if_stmt>fpath<block_start><if_stmt>is_exe(program)<block_start><return>program<block_end><block_end><else_stmt><block_start><for_stmt>path os.environ["PATH"].split(os.pathsep)<block_start>exe_file=os.path.join(path program)<if_stmt>is_exe(exe_file)<block_start><return>exe_file<block_end><block_end><block_end><return><none><block_end>
# External <import_from_stmt>Qt QtWidgets<import_from_stmt>Qt QtGui<import_from_stmt>Qt QtCore<class_stmt>PixmapButton(QtWidgets.QAbstractButton)<block_start>"""https://stackoverflow.com/questions/2711033/how-code-a-image-button-in-pyqt"""<def_stmt>__init__ self pixmap pixmap_hover pixmap_pressed pixmap_checked=<none> pixmap_checked_hover=<none> pixmap_checked_pressed=<none> size=32 checkable=<false> parent=<none><block_start>super(PixmapButton self).__init__(parent=parent)<line_sep>self.pixmap=pixmap<line_sep>self.pixmap_hover=pixmap_hover<line_sep>self.pixmap_pressed=pixmap_pressed<line_sep>self.pixmap_checked=pixmap_checked<line_sep>self.pixmap_checked_hover=pixmap_checked_hover<line_sep>self.pixmap_checked_pressed=pixmap_checked_pressed<line_sep>self.size=size<if_stmt>checkable<block_start>self.setCheckable(checkable)<block_end>self.pressed.connect(self.update)<line_sep>self.released.connect(self.update)<line_sep>self.action=<none><block_end><def_stmt>set_action self action<block_start>self.action=action<line_sep># get properties self.setToolTip(self.action.toolTip())<line_sep>self.setWhatsThis(self.action.whatsThis())<line_sep># connect signals action.triggered.connect(self.update_state)<line_sep>action.toggled.connect(self.update_state)<if_stmt>action.isCheckable()<block_start>self.toggled.connect(action.toggle)<block_end><else_stmt><block_start>self.clicked.connect(action.trigger)<block_end><block_end><def_stmt>update_state self<block_start><if_stmt>self.action<block_start>self.blockSignals(<true>)<line_sep>self.setChecked(self.action.isChecked())<line_sep>self.blockSignals(<false>)<block_end><block_end><def_stmt>paintEvent self event<block_start><if_stmt><not>isinstance(event QtGui.QPaintEvent)<block_start><return><block_end><if_stmt>self.isChecked()<block_start>pix=self.pixmap_checked_hover<if>self.underMouse()<else>self.pixmap_checked<if_stmt>self.isDown()<block_start>pix=self.pixmap_checked_pressed<block_end><block_end><else_stmt><block_start>pix=self.pixmap_hover<if>self.underMouse()<else>self.pixmap<if_stmt>self.isDown()<block_start>pix=self.pixmap_pressed<block_end><block_end>painter=QtGui.QPainter(self)<line_sep>painter.drawPixmap(event.rect() pix)<del_stmt>painter<block_end><def_stmt>enterEvent self event<block_start>self.update()<block_end><def_stmt>leaveEvent self event<block_start>self.update()<block_end><def_stmt>sizeHint self<block_start><return>QtCore.QSize(self.size self.size)<block_end><block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Processes references for eval (except for tokenization)."""<import_stmt>json<import_stmt>os<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>language.totto table_to_text_utils<import_stmt>six<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string("input_path" <none> "Input json file.")<line_sep>flags.DEFINE_string("output_dir" <none> "Output directory.")<line_sep>flags.DEFINE_string("mode" <none> "Either 'dev', or 'test'")<def_stmt>get_references json_example mode="dev"<block_start>"""Get references from json example."""<line_sep>multi_reference=[]<for_stmt>annotation json_example["sentence_annotations"]<block_start>final_sentence=annotation["final_sentence"]<line_sep>multi_reference.append(final_sentence)<block_end><if_stmt>mode<eq>"dev"<or>mode<eq>"test"<block_start><while_stmt>len(multi_reference)<l>3<block_start>multi_reference.append("<null>")<block_end><block_end><if_stmt>mode<eq>"dev"<or>mode<eq>"test"<block_start><if_stmt>json_example["overlap_subset"]<block_start>multi_overlap_reference=multi_reference<line_sep>multi_nonoverlap_reference=<none><block_end><else_stmt><block_start>multi_nonoverlap_reference=multi_reference<line_sep>multi_overlap_reference=<none><block_end><block_end><else_stmt><block_start>multi_overlap_reference=<none><line_sep>multi_nonoverlap_reference=<none><block_end><return>multi_reference multi_overlap_reference multi_nonoverlap_reference<block_end><def_stmt>get_parent_tables json_example mode="dev"<block_start>"""Get tables in PARENT format for each json example."""<line_sep>table=json_example["table"]<line_sep>table_page_title=json_example["table_page_title"]<line_sep>table_section_title=json_example["table_section_title"]<line_sep>table_section_text=json_example["table_section_text"]<line_sep>cell_indices=json_example["highlighted_cells"]<line_sep>highlighted_subtable=(table_to_text_utils.get_highlighted_subtable(table=table cell_indices=cell_indices))<line_sep># Get PARENT format code. table_prec=table_to_text_utils.get_table_parent_format(table=table table_page_title=table_page_title table_section_title=table_section_title table_section_text=table_section_text)<line_sep>table_rec=table_to_text_utils.get_subtable_parent_format(subtable=highlighted_subtable table_page_title=table_page_title table_section_title=table_section_title)<line_sep>overlap_table_prec=<none><line_sep>overlap_table_rec=<none><line_sep>nonoverlap_table_prec=<none><line_sep>nonoverlap_table_rec=<none><if_stmt>mode<eq>"dev"<or>mode<eq>"test"<block_start><if_stmt>json_example["overlap_subset"]<block_start>overlap_table_prec=table_prec<line_sep>overlap_table_rec=table_rec<block_end><else_stmt><block_start>nonoverlap_table_prec=table_prec<line_sep>nonoverlap_table_rec=table_rec<block_end><block_end><return>(table_prec table_rec overlap_table_prec overlap_table_rec nonoverlap_table_prec nonoverlap_table_rec)<block_end><def_stmt>write_references references output_path_base<block_start>"""Write single and multiple references to file."""<line_sep># Just write a single reference file for now. <with_stmt>open(output_path_base "w" encoding="utf-8")<as>f<block_start><for_stmt>multi_reference references<block_start>f.write(multi_reference[0].lower()+"\n")<block_end><block_end># Write out multireferences. <if_stmt>FLAGS.mode<eq>"dev"<or>FLAGS.mode<eq>"test"<block_start>output_path_multi0=output_path_base+"-multi0"<with_stmt>open(output_path_multi0 "w" encoding="utf-8")<as>f<block_start><for_stmt>multi_reference references<block_start>f.write(multi_reference[0].lower()+"\n")<block_end><block_end>output_path_multi1=output_path_base+"-multi1"<with_stmt>open(output_path_multi1 "w" encoding="utf-8")<as>f<block_start><for_stmt>multi_reference references<block_start>f.write(multi_reference[1].lower()+"\n")<block_end><block_end>output_path_multi2=output_path_base+"-multi2"<with_stmt>open(output_path_multi2 "w" encoding="utf-8")<as>f<block_start><for_stmt>multi_reference references<block_start>f.write(multi_reference[2].lower()+"\n")<block_end><block_end><block_end><block_end><def_stmt>write_table_parent_format tables output_path<block_start><with_stmt>open(output_path "w" encoding="utf-8")<as>f<block_start><for_stmt>table tables<block_start>f.write(table.lower()+"\n")<block_end><block_end><block_end><def_stmt>main _<block_start>input_path=FLAGS.input_path<line_sep>output_dir=FLAGS.output_dir<line_sep>all_references=[]<line_sep>overlap_references=[]<line_sep>nonoverlap_references=[]<line_sep>parent_prec_tables=[]<line_sep>parent_rec_tables=[]<line_sep>overlap_parent_prec_tables=[]<line_sep>overlap_parent_rec_tables=[]<line_sep>nonoverlap_parent_prec_tables=[]<line_sep>nonoverlap_parent_rec_tables=[]<with_stmt>open(input_path "r" encoding="utf-8")<as>input_file<block_start><for_stmt>line input_file<block_start>line=six.ensure_text(line "utf-8")<line_sep>json_example=json.loads(line)<line_sep>multi_reference,multi_overlap_reference,multi_nonoverlap_reference=(get_references(json_example FLAGS.mode))<line_sep>all_references.append(multi_reference)<if_stmt>multi_overlap_reference<block_start>overlap_references.append(multi_overlap_reference)<block_end><if_stmt>multi_nonoverlap_reference<block_start>nonoverlap_references.append(multi_nonoverlap_reference)<block_end>(table_prec table_rec overlap_table_prec overlap_table_rec nonoverlap_table_prec nonoverlap_table_rec)=(get_parent_tables(json_example FLAGS.mode))<line_sep>parent_prec_tables.append(table_prec)<line_sep>parent_rec_tables.append(table_rec)<if_stmt>overlap_table_prec<and>overlap_table_rec<block_start>overlap_parent_prec_tables.append(overlap_table_prec)<line_sep>overlap_parent_rec_tables.append(overlap_table_rec)<block_end><if_stmt>nonoverlap_table_prec<and>nonoverlap_table_rec<block_start>nonoverlap_parent_prec_tables.append(nonoverlap_table_prec)<line_sep>nonoverlap_parent_rec_tables.append(nonoverlap_table_rec)<block_end><block_end><block_end>print("Writing references.")<line_sep>all_output_path_base=os.path.join(output_dir "references")<line_sep>overlap_output_path_base=os.path.join(output_dir "overlap_references")<line_sep>nonoverlap_output_path_base=os.path.join(output_dir "nonoverlap_references")<line_sep>write_references(all_references all_output_path_base)<line_sep>write_references(overlap_references overlap_output_path_base)<line_sep>write_references(nonoverlap_references nonoverlap_output_path_base)<line_sep>print("Writing tables in PARENT format.")<line_sep>all_table_prec_path=os.path.join(output_dir "tables_parent_precision_format")<line_sep>all_table_rec_path=os.path.join(output_dir "tables_parent_recall_format")<line_sep>overlap_table_prec_path=os.path.join(output_dir "overlap_tables_parent_precision_format")<line_sep>overlap_table_rec_path=os.path.join(output_dir "overlap_tables_parent_recall_format")<line_sep>nonoverlap_table_prec_path=os.path.join(output_dir "nonoverlap_tables_parent_precision_format")<line_sep>nonoverlap_table_rec_path=os.path.join(output_dir "nonoverlap_tables_parent_recall_format")<line_sep>write_table_parent_format(parent_prec_tables all_table_prec_path)<line_sep>write_table_parent_format(parent_rec_tables all_table_rec_path)<line_sep>write_table_parent_format(overlap_parent_prec_tables overlap_table_prec_path)<line_sep>write_table_parent_format(overlap_parent_rec_tables overlap_table_rec_path)<line_sep>write_table_parent_format(nonoverlap_parent_prec_tables nonoverlap_table_prec_path)<line_sep>write_table_parent_format(nonoverlap_parent_rec_tables nonoverlap_table_rec_path)<block_end><if_stmt>__name__<eq>"__main__"<block_start>flags.mark_flags_as_required(["input_path" "output_dir" "mode"])<line_sep>app.run(main)<block_end>
<def_stmt>extractAlpenGlowTranslations item<block_start>""" 'Alpen Glow Translations' """<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol<or>frag)<or>'preview'<in>item['title'].lower()<block_start><return><none><block_end>tagmap={'<NAME>':'The Legend of the Concubine\'s Daughter Minglan' }<for_stmt>tag,sname tagmap.items()<block_start><if_stmt>tag<in>item['tags']<block_start><return>buildReleaseMessageWithType(item sname vol chp frag=frag)<block_end><block_end><return><false><block_end>
<import_from_stmt>pygears module<import_from_stmt>pygears.sim delta<import_from_stmt>pygears.typing Queue Tuple typeof<import_from_stmt>pygears gear alternative<import_from_stmt>pygears.lib.shred shred<import_from_stmt>.ccat ccat<import_from_stmt>.permute permuted_apply<import_from_stmt>.cat_util din_data_cat_value<import_from_stmt>functools reduce<import_from_stmt>pygears.util.utils gather<def_stmt>lvl_if_queue t<block_start><if_stmt><not>issubclass(t Queue)<block_start><return>0<block_end><else_stmt><block_start><return>t.lvl<block_end><block_end><def_stmt>data_if_queue t<block_start><if_stmt><not>issubclass(t Queue)<block_start><return>t<block_end><else_stmt><block_start><return>t[0]<block_end><block_end><def_stmt>zip_type dtypes<block_start>arg_queue_lvl=list(map(lvl_if_queue dtypes))<line_sep>base_type=Tuple[tuple(map(data_if_queue dtypes))]<line_sep># If there are no Queues, i.e. max(arg_queue_lvl) == 0, the type below # will resolve to just base_type <return>Queue[base_type max(arg_queue_lvl)]<block_end>@gear<async_keyword><def_stmt>zip_cat *din<arrow>b'zip_type(din)'<block_start>id_max_lvl,max_lvl=max(enumerate(din) key=<lambda>p:p[1].dtype.lvl<if>typeof(p[1].dtype Queue)<else>0)<async_keyword><with_stmt>gather(*din)<as>dout<block_start><yield>(din_data_cat_value(dout) dout[id_max_lvl].eot)<block_end><block_end><def_stmt>isort iterable key=<lambda>x:x reverse=<false><block_start>res=sorted(enumerate(iterable) key=key reverse=reverse)<line_sep>values=tuple(d[1]<for>d res)<line_sep>indices=tuple(d[0]<for>d res)<line_sep><return>values indices<block_end>@gear<def_stmt>czip2 a b<arrow>b'zip_type((a, b))'<block_start><return>(a b)|zip_sync(outsync=<false>)|zip_cat<block_end>@gear<def_stmt>czip *din<block_start><if_stmt>len(din)<eq>2<block_start><return>czip2(*din)<block_end># Sort input interfaces in descending order of their Queue levels, i.e. we # want to zip highest Queue levels first in order to synchronize them first din_sorted_by_lvl,din_sort_indices=isort(din key=<lambda>x:lvl_if_queue(x[1].dtype) reverse=<true>)<line_sep># Zip din's in sorted order using it as a binary operation. This will # produce nested Tuple's, hence we cast it to a Queue of single Tuple ret_flat_type=zip_type([d.dtype<for>d din_sorted_by_lvl])<def_stmt>czip_cascade *din<block_start><return>reduce(czip din)<rshift>ret_flat_type<block_end><return>permuted_apply(*din f=czip_cascade indices=din_sort_indices)<block_end>@gear<def_stmt>unzip din * dtypes<block_start>zdata,zlast=din<def_stmt>split <block_start><for_stmt>i,d enumerate(dtypes)<block_start>data=zdata[i]<if_stmt>issubclass(d Queue)<block_start><yield>ccat(data zlast[:d.lvl])|Queue[data.dtype d.lvl]<block_end><else_stmt><block_start><yield>data<block_end><block_end><block_end><return>tuple(split())<block_end>@gear(enablement=b'len(din) == 2')<async_keyword><def_stmt>zip_sync *din outsync=<true><arrow>b'din'<block_start>lvls=tuple(d.dtype.lvl<if>typeof(d.dtype Queue)<else>0<for>d din)<line_sep>overlap_lvl=min(lvls)<line_sep>eot_aligned=(1 1)<while_stmt>(1)<block_start>din_data=[(<await>d.pull())<for>d din]<if_stmt>overlap_lvl<g>0<block_start>eot_overlap=[d.eot[:overlap_lvl]<for>d din_data]<line_sep>eot_aligned=(eot_overlap[0]<ge>eot_overlap[1] eot_overlap[1]<ge>eot_overlap[0])<block_end><else_stmt><block_start>eot_aligned=(1 1)<line_sep>eot_overlap=din_data[0].eot<if>lvls[0]<else>din_data[1].eot<block_end><if_stmt>all(eot_aligned)<block_start><yield>din_data<block_end><else_stmt><block_start><await>delta()<block_end><for_stmt>d,aligned zip(din eot_aligned)<block_start><if_stmt>(<not>aligned)<or>all(eot_aligned)<block_start>d.ack()<block_end><block_end><block_end><block_end>@alternative(zip_sync)@gear(enablement=b'len(din) > 2')<def_stmt>zip_sync_vararg *din<block_start><return>din|czip|unzip(dtypes=[d.dtype<for>d din])<block_end>@gear<def_stmt>zip_sync_with sync_in din * balance=<none><block_start><if_stmt>balance<block_start>sync_in=sync_in|balance<block_end>din_sync,sync_in_sync=zip_sync(din sync_in)<line_sep>sync_in_sync|shred<line_sep><return>din_sync<block_end>@gear<def_stmt>zip_wrap_with sync din<block_start>din_zip=czip(sync din)<line_sep><return>ccat(din_zip['data'][1] din_zip['eot'])|Queue<block_end>
<import_from_stmt>random choices randint<import_from_stmt>string ascii_letters digits<line_sep>account_chars:str=digits+ascii_letters<def_stmt>_random_account_id <arrow>str<block_start>"""Return a random account number made of 12 characters"""<line_sep><return>"".join(choices(account_chars k=12))<block_end><def_stmt>_random_amount <arrow>float<block_start>"""Return a random amount between 1.00 and 1000.00"""<line_sep><return>randint(100 1000000)/100<block_end><def_stmt>create_random_transaction <arrow>dict<block_start>"""Create a fake randomised transaction."""<line_sep><return>{"source":_random_account_id() "target":_random_account_id() "amount":_random_amount() "currency":"EUR"}<block_end>
# table_base.py - Key classes and methods for pretty print text table. # Copyright (C) 2012 Free Software Foundation, Inc. # Author: <NAME> # Package: SublimeTableEditor # Homepage: https://github.com/vkocubinsky/SublimeTableEditor # This file is part of SublimeTableEditor. # SublimeTableEditor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # SublimeTableEditor is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>. <import_from_future_stmt> print_function<import_from_future_stmt> division<import_stmt>math<import_stmt>re<import_stmt>csv<try_stmt><block_start><import_from_stmt>. table_line_parser<as>tparser<import_from_stmt>.widechar_support wlen wcount<block_end><except_stmt>ValueError<block_start><import_stmt>table_line_parser<as>tparser<import_from_stmt>widechar_support wlen wcount<block_end><class_stmt>TableConfiguration<block_start><def_stmt>__init__ self<block_start>self.keep_space_left=<false><line_sep>self.align_number_right=<true><line_sep>self.detect_header=<true><line_sep>self.intelligent_formatting=<true><line_sep>#only for simple syntax self.hline_out_border=<none><line_sep>self.hline_in_border=<none><line_sep>self.custom_column_alignment=<true><block_end><block_end><class_stmt>TableSyntax<block_start><def_stmt>__init__ self name table_configuration<block_start>self.name=name<line_sep>self.table_configuration=table_configuration<or>TableConfiguration()<line_sep>self.align_number_right=self.table_configuration.align_number_right<line_sep>self.detect_header=self.table_configuration.detect_header<line_sep>self.keep_space_left=self.table_configuration.keep_space_left<line_sep>self.intelligent_formatting=self.table_configuration.intelligent_formatting<line_sep>self.line_parser=tparser.LineParserPlus("(?:[|])")<line_sep># Must be set in sublass constructor self.table_parser=<none><line_sep>self.table_driver=<none><block_end><block_end><class_stmt>Column(object)<block_start>ALIGN_LEFT='left'<line_sep>ALIGN_RIGHT='right'<line_sep>ALIGN_CENTER='center'<def_stmt>__init__ self row<block_start>self.row=row<line_sep>self.table=row.table<line_sep>self.syntax=row.table.syntax<line_sep>self.col_len=0<line_sep>self.align=<none><line_sep>self.header=<none><line_sep>self.colspan=1<line_sep>self.rowspan=1<line_sep>self.pseudo_columns=[]<line_sep>self.left_border_text='|'<line_sep>self.right_border_text='|'<block_end><def_stmt>min_len self<block_start><raise>NotImplementedError<block_end><def_stmt>render self<block_start><raise>NotImplementedError<block_end><def_stmt>align_follow self<block_start><return><none><block_end><def_stmt>pseudo self<block_start><return><false><block_end><block_end><class_stmt>PseudoColumn(Column)<block_start><def_stmt>__init__ self row master_column<block_start>Column.__init__(self row)<line_sep>self.master_column=master_column<line_sep>self.data=''<block_end><def_stmt>render self<block_start><return>''<block_end><def_stmt>min_len self<block_start><return>self.master_column.min_len()<block_end><def_stmt>pseudo self<block_start><return><true><block_end><block_end><class_stmt>Row<block_start><def_stmt>__init__ self table<block_start>self.table=table<line_sep>self.syntax=table.syntax<line_sep>self.columns=[]<block_end><def_stmt>__getitem__ self index<block_start><return>self.columns[index]<block_end><def_stmt>__len__ self<block_start><return>len(self.columns)<block_end><def_stmt>is_header_separator self<block_start><return><false><block_end><def_stmt>is_separator self<block_start><return><false><block_end><def_stmt>is_data self<block_start><return><false><block_end><def_stmt>is_align self<block_start><return><false><block_end><def_stmt>append self column<block_start>self.columns.append(column)<for_stmt>i range(0 column.colspan-1)<block_start>psedo_column=PseudoColumn(self column)<line_sep>column.pseudo_columns.append(psedo_column)<line_sep>self.columns.append(psedo_column)<block_end><block_end><def_stmt>new_empty_column self<block_start><raise>NotImplementedError<block_end><def_stmt>create_column self text<block_start><raise>NotImplementedError<block_end><def_stmt>render self<block_start>r=""<for_stmt>ind,column enumerate(self.columns)<block_start><if_stmt>column.pseudo()<block_start><continue><block_end><if_stmt>ind<eq>0<block_start>r<augadd>self.convert_border(column.left_border_text)<block_end>r<augadd>column.render()<line_sep>r<augadd>self.convert_border(column.right_border_text)<block_end><return>r<block_end><def_stmt>convert_border self border_text# if separator converts to data <block_start><return>border_text.replace('+' '|')<block_end><block_end><class_stmt>DataRow(Row)<block_start><def_stmt>new_empty_column self<block_start><return>DataColumn(self '')<block_end><def_stmt>create_column self text<block_start><return>DataColumn(self text)<block_end><def_stmt>is_data self<block_start><return><true><block_end><block_end><class_stmt>DataColumn(Column)<block_start><def_stmt>__init__ self row data<block_start>Column.__init__(self row)<line_sep>self.data=data<line_sep>self.left_space=' '<line_sep>self.right_space=' '<block_end><def_stmt>_norm self<block_start><if_stmt>self.syntax.keep_space_left<block_start><if_stmt>self.header<block_start>norm=self.data.strip()<block_end><else_stmt><block_start>norm=self.data.rstrip()<if_stmt>norm[:1]<eq>' '<block_start>norm=norm[1:]<block_end><block_end><block_end><else_stmt><block_start>norm=self.data.strip()<block_end><return>norm<block_end><def_stmt>min_len self<block_start><return>int(math.ceil(self.total_min_len()/self.colspan))<block_end><def_stmt>total_min_len self# min of ' ' or ' xxxx ' <block_start>space_len=len(self.left_space)+len(self.right_space)<line_sep>total_min_len=max(space_len+1 wlen(self._norm())+space_len)<line_sep>total_min_len=(total_min_len+(len(self.left_border_text)-1)+(len(self.right_border_text)-1))<line_sep><return>total_min_len<block_end><def_stmt>render self# colspan -1 is count of '|' <block_start>total_col_len=(self.col_len+(self.colspan-1)+sum([col.col_len<for>col self.pseudo_columns]))<line_sep>#if self.syntax.multi_markdown_syntax(): # total_col_len = total_col_len - (self.colspan - 1) total_col_len=(total_col_len# left border already calculated # - (len(self.left_border_text) - 1) -(len(self.right_border_text)-1))<line_sep>norm=self._norm()<line_sep>space_len=len(self.left_space)+len(self.right_space)<line_sep>total_align_len=total_col_len-wcount(norm)<if_stmt>self.header<and>self.syntax.detect_header<block_start>align_value=norm.center(total_align_len-space_len ' ')<block_end><elif_stmt>self.align<eq>Column.ALIGN_RIGHT<block_start>align_value=norm.rjust(total_align_len-space_len ' ')<block_end><elif_stmt>self.align<eq>Column.ALIGN_CENTER<block_start>align_value=norm.center(total_align_len-space_len ' ')<block_end><else_stmt><block_start>align_value=norm.ljust(total_align_len-space_len ' ')<block_end><return>self.left_space+align_value+self.right_space<block_end><block_end><def_stmt>check_condition condition message<block_start><if_stmt><not>condition<block_start><raise>TableException(message)<block_end><block_end><class_stmt>TextTable<block_start><def_stmt>__init__ self syntax<block_start>self.syntax=syntax<line_sep>self.prefix=""<line_sep>self.rows=[]<line_sep>self.pack()<block_end><def_stmt>__len__ self<block_start><return>len(self.rows)<block_end><def_stmt>empty self<block_start><return>len(self.rows)<eq>0<block_end><def_stmt>__getitem__ self index<block_start><return>self.rows[index]<block_end><def_stmt>_max_column_count self<block_start><return>max([len(row)<for>row self.rows])<block_end><def_stmt>_rstrip self<block_start><if_stmt>len(self.rows)<le>1<block_start><return><block_end>max_column_count=self._max_column_count()<line_sep>long_lines_count=0<line_sep>long_line_ind=0<for_stmt>row_ind,row enumerate(self.rows)<block_start><if_stmt>len(row)<eq>max_column_count<block_start>long_lines_count<augadd>1<line_sep>long_line_ind=row_ind<block_end><block_end><if_stmt>long_lines_count<eq>1<block_start>row=self.rows[long_line_ind]<line_sep>overspans=sum([column.colspan-1<for>column row.columns])<if_stmt>row.is_data()<and>overspans<g>0<block_start>shift=0<for_stmt>shift,column enumerate(row[::-1])<block_start><if_stmt>column.pseudo()<or>len(column.data.strip())<g>0<block_start><break><block_end><block_end><if_stmt>shift<g>0<block_start><if_stmt>len(self.rows)<eq>2<block_start><if_stmt>shift<ne>overspans<block_start><return><block_end><block_end>row.columns=row.columns[:-shift]<block_end><block_end><block_end><block_end><def_stmt>pack self<block_start><if_stmt>len(self.rows)<eq>0<block_start><return><block_end>column_count=self._max_column_count()<if_stmt>column_count<eq>0<block_start>self.rows=[]<line_sep><return><block_end>#intelligent formatting <if_stmt>self.syntax.intelligent_formatting<block_start>self._rstrip()<line_sep>column_count=self._max_column_count()<block_end>#adjust/extend column count rowspans=[0]<times>column_count<for_stmt>row self.rows<block_start>overcols=sum([rowspan<for>rowspan rowspans<if>rowspan<g>0])<line_sep>diff_count=column_count-len(row)-overcols<for_stmt>i range(diff_count)<block_start>row.columns.append(row.new_empty_column())<block_end><if_stmt>len(row)<eq>0<block_start>row.columns.append(row.new_empty_column())<block_end>#prepare rowspans for next row <for_stmt>col_ind,rowspan enumerate(rowspans)<block_start><if_stmt>rowspan<g>0<block_start>rowspans[col_ind]=rowspans[col_ind]-1<block_end><block_end><for_stmt>col_ind,column enumerate(row.columns)<block_start>rowspans[col_ind]=rowspans[col_ind]+column.rowspan-1<block_end><block_end>#calculate column lens col_lens=[0]<times>column_count<for_stmt>row self.rows<block_start><for_stmt>col_ind,column enumerate(row.columns)<block_start>col_lens[col_ind]=max(col_lens[col_ind] column.min_len())<block_end><block_end>#set column len <for_stmt>row self.rows<block_start><for_stmt>column,col_len zip(row.columns col_lens)<block_start>column.col_len=col_len<block_end><block_end>#header header_separator_index=-1<line_sep>first_data_index=-1<if_stmt>self.syntax.detect_header<block_start><for_stmt>row_ind,row enumerate(self.rows)<block_start><if_stmt>first_data_index<eq>-1<and>row.is_data()<block_start>first_data_index=row_ind<block_end><if_stmt>(first_data_index<ne>-1<and>header_separator_index<eq>-1<and>row.is_header_separator())<block_start>header_separator_index=row_ind<for_stmt>header_index range(first_data_index header_separator_index)<block_start><if_stmt>self.rows[header_index].is_data()<block_start><for_stmt>column self.rows[header_index].columns<block_start>column.header=<true><block_end><block_end><block_end><block_end><block_end><block_end>#set column alignment data_alignment=[<none>]<times>len(col_lens)<for_stmt>row_ind,row enumerate(self.rows)<block_start><if_stmt>row_ind<l>header_separator_index<block_start><if_stmt>row.is_align()<block_start><for_stmt>col_ind,column enumerate(row.columns)<block_start>data_alignment[col_ind]=column.align_follow()<block_end><block_end><continue><block_end><elif_stmt>row.is_align()<block_start><for_stmt>col_ind,column enumerate(row.columns)<block_start>data_alignment[col_ind]=column.align_follow()<block_end><block_end><elif_stmt>row.is_data()<block_start><for_stmt>col_ind,column enumerate(row.columns)<block_start><if_stmt>data_alignment[col_ind]<is><none><block_start><if_stmt>self.syntax.align_number_right<and>self._is_number_column(row_ind col_ind)<block_start>data_alignment[col_ind]=Column.ALIGN_RIGHT<block_end><else_stmt><block_start>data_alignment[col_ind]=Column.ALIGN_LEFT<block_end><block_end>column.align=data_alignment[col_ind]<block_end><block_end><block_end><block_end><def_stmt>_is_number_column self start_row_ind col_ind<block_start><assert_stmt>self.rows[start_row_ind].is_data()<for_stmt>row self.rows[start_row_ind:]<block_start><if_stmt>(row.is_data()<and>col_ind<l>len(row.columns)<and>len(row.columns[col_ind].data.strip())<g>0)<block_start><try_stmt><block_start>float(row.columns[col_ind].data)<block_end><except_stmt>ValueError<block_start><return><false><block_end><block_end><block_end><return><true><block_end><def_stmt>render_lines self<block_start><return>[self.prefix+row.render()<for>row self.rows]<block_end><def_stmt>render self<block_start><return>"\n".join(self.render_lines())<block_end><def_stmt>is_col_colspan self col<block_start><for_stmt>row self.rows<block_start><if_stmt>col<l>len(row)<block_start><if_stmt>row[col].pseudo()<or>row[col].colspan<g>1<block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>is_row_colspan self row<block_start><for_stmt>column self[row].columns<block_start><if_stmt>column.pseudo()<or>column.colspan<g>1<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>assert_not_col_colspan self col<block_start>check_condition(self.is_col_colspan(col)<is><false> "Expected not colspan column, but column {0}"<concat>" is colspan".format(col))<block_end><def_stmt>delete_column self col<block_start>self.assert_not_col_colspan(col)<for_stmt>row self.rows<block_start><if_stmt>col<l>len(row)<block_start><del_stmt>row.columns[col]<block_end><block_end>self.pack()<block_end><def_stmt>swap_columns self i j<block_start>self.assert_not_col_colspan(i)<line_sep>self.assert_not_col_colspan(j)<for_stmt>row self.rows<block_start><if_stmt>i<l>len(row)<and>j<l>len(row)<block_start>row.columns[i],row.columns[j]=row.columns[j] row.columns[i]<block_end><block_end>self.pack()<block_end><def_stmt>delete_row self i<block_start><assert_stmt>0<le>i<l>len(self.rows)<del_stmt>self.rows[i]<line_sep>self.pack()<block_end><def_stmt>swap_rows self i j<block_start>check_condition((0<le>i<l>len(self.rows)<and>0<le>j<l>len(self.rows)) "Index out of range")<line_sep>self.rows[i],self.rows[j]=self.rows[j] self.rows[i]<for_stmt>column self.rows[i].columns<block_start>column.header=<false><block_end><for_stmt>column self.rows[j].columns<block_start>column.header=<false><block_end>self.pack()<block_end><def_stmt>insert_empty_row self i<block_start>check_condition(i<ge>0 "Index should be more than zero")<line_sep>self.rows.insert(i DataRow(self))<line_sep>self.pack()<block_end><def_stmt>insert_empty_column self i<block_start>check_condition(i<ge>0 "Index should be more than zero")<line_sep>self.assert_not_col_colspan(i)<for_stmt>row self.rows<block_start>row.columns.insert(i row.new_empty_column())<block_end>self.pack()<block_end><block_end><class_stmt>TableException(Exception)<block_start><def_stmt>__init__ self value<block_start>self.value=value<block_end><def_stmt>__str__ self<block_start><return>repr(self.value)<block_end><block_end><class_stmt>TablePos<block_start><def_stmt>__init__ self row_num field_num<block_start>self.row_num=row_num<line_sep>self.field_num=field_num<block_end><def_stmt>__repr__ self<block_start><return>"TablePos({self.row_num}, {self.field_num})".format(self=self)<block_end><def_stmt>__str__ self<block_start><return>self.__repr__()<block_end><def_stmt>__eq__ self other<block_start><return>(self.row_num<eq>other.row_num<and>self.field_num<eq>other.field_num)<block_end><block_end><class_stmt>TableDriver<block_start><def_stmt>__init__ self syntax<block_start>self.syntax=syntax<block_end><def_stmt>visual_column_count self table row_ind<block_start><return>sum([1<for>column table[row_ind].columns<if><not>column.pseudo()])<block_end><def_stmt>internal_to_visual_index self table internal_pos<block_start>visual_pos=TablePos(internal_pos.row_num internal_pos.field_num)<for_stmt>col_ind range(internal_pos.field_num+1)<block_start><if_stmt>table[internal_pos.row_num][col_ind].pseudo()<block_start>visual_pos.field_num<augsub>1<block_end><block_end><return>visual_pos<block_end><def_stmt>visual_to_internal_index self table visual_pos<block_start>internal_pos=TablePos(visual_pos.row_num 0)<line_sep>count_visual=0<line_sep>internal_pos.field_num=0<for_stmt>col_ind range(len(table[visual_pos.row_num]))<block_start><if_stmt><not>table[visual_pos.row_num][col_ind].pseudo()<block_start>count_visual<augadd>1<line_sep>internal_pos.field_num=col_ind<block_end><if_stmt>count_visual<eq>visual_pos.field_num+1<block_start><break><block_end><block_end><else_stmt><block_start>print("WARNING: Visual Index Not found")<block_end><return>internal_pos<block_end><def_stmt>get_cursor self table visual_pos# # ' | 1 | 2 | 3_| 4 |' <block_start>internal_pos=self.visual_to_internal_index(table visual_pos)<line_sep>base_len=(len(table.prefix)+sum([column.col_len-wcount(column.render())<for>column,ind zip(table[visual_pos.row_num].columns range(internal_pos.field_num))])+internal_pos.field_num+1# count of '|' )<line_sep>text=table[internal_pos.row_num][internal_pos.field_num].render()<line_sep>match=re.search(r"([^\s])\s*$" text)<if_stmt>match<block_start>col_pos=match.end(1)<block_end><else_stmt><block_start>col_pos=1<block_end><return>base_len+col_pos<block_end><def_stmt>editor_move_column_left self table table_pos<block_start>internal_pos=self.visual_to_internal_index(table table_pos)<line_sep>field_num=internal_pos.field_num<if_stmt>field_num<g>0<block_start><if_stmt>(table.is_col_colspan(field_num)<or>table.is_col_colspan(field_num-1))<block_start><raise>TableException("Move Column Left is not "<concat>"permitted for colspan column")<block_end><else_stmt><block_start>table.swap_columns(field_num field_num-1)<line_sep><return>("Column moved to left" TablePos(table_pos.row_num table_pos.field_num-1))<block_end><block_end><else_stmt><block_start><raise>TableException("Move Column Left doesn't "<concat>"make sence for the first column in the "<concat>"table.")<block_end><block_end><def_stmt>editor_move_column_right self table table_pos<block_start>internal_pos=self.visual_to_internal_index(table table_pos)<line_sep>field_num=internal_pos.field_num<if_stmt>field_num<l>len(table[table_pos.row_num])-1<block_start><if_stmt>(table.is_col_colspan(field_num)<or>table.is_col_colspan(field_num+1))<block_start><raise>TableException("Move Column Right is not "<concat>"permitted for colspan column")<block_end><else_stmt><block_start>table.swap_columns(field_num field_num+1)<line_sep><return>("Column moved to right" TablePos(table_pos.row_num table_pos.field_num+1))<block_end><block_end><else_stmt><block_start><raise>TableException("Move Column Right doesn't "<concat>"make sense for the last column in the "<concat>"table.")<block_end><block_end><def_stmt>editor_move_row_up self table table_pos<block_start><if_stmt>table_pos.row_num<g>0<block_start>table.swap_rows(table_pos.row_num table_pos.row_num-1)<line_sep><return>("Row moved up" TablePos(table_pos.row_num-1 table_pos.field_num))<block_end><else_stmt><block_start><raise>TableException("Move Row Up doesn't make sense for the "<concat>"first row in the table")<block_end><block_end><def_stmt>editor_move_row_down self table table_pos<block_start><if_stmt>table_pos.row_num+1<l>len(table)<block_start>table.swap_rows(table_pos.row_num table_pos.row_num+1)<line_sep><return>("Row moved down" TablePos(table_pos.row_num+1 table_pos.field_num))<block_end><else_stmt><block_start><raise>TableException("Move Row Down doesn't make sense for the "<concat>"last row in the table")<block_end><block_end><def_stmt>editor_next_row self table table_pos<block_start><if_stmt>table_pos.row_num+1<l>len(table)<block_start><if_stmt>table[table_pos.row_num+1].is_header_separator()<block_start>table.insert_empty_row(table_pos.row_num+1)<block_end><block_end><else_stmt><block_start>table.insert_empty_row(len(table))<block_end><return>("Moved to next row" TablePos(table_pos.row_num+1 table_pos.field_num))<block_end><def_stmt>editor_delete_column self table table_pos<block_start>internal_pos=self.visual_to_internal_index(table table_pos)<line_sep>field_num=internal_pos.field_num<if_stmt>table.is_col_colspan(field_num)<block_start><raise>TableException("Delete column is not permitted for "<concat>"colspan column")<block_end><else_stmt><block_start>table.delete_column(field_num)<line_sep>new_table_pos=TablePos(table_pos.row_num table_pos.field_num)<if_stmt>(<not>table.empty()<and>table_pos.field_num<eq>len(table[table_pos.row_num]))<block_start>new_table_pos.field_num=new_table_pos.field_num-1<block_end><return>("Column deleted" new_table_pos)<block_end><block_end><def_stmt>editor_insert_column self table table_pos<block_start>internal_pos=self.visual_to_internal_index(table table_pos)<line_sep>field_num=internal_pos.field_num<if_stmt>table.is_col_colspan(field_num)<block_start><raise>TableException("Insert column is not permitted for "<concat>"colspan column")<block_end><else_stmt><block_start>table.insert_empty_column(field_num)<line_sep><return>("Column inserted" TablePos(table_pos.row_num table_pos.field_num))<block_end><block_end><def_stmt>editor_kill_row self table table_pos<block_start>table.delete_row(table_pos.row_num)<line_sep>new_table_pos=TablePos(table_pos.row_num table_pos.field_num)<if_stmt>table_pos.row_num<eq>len(table)<block_start>new_table_pos.row_num=new_table_pos.row_num-1<block_end><return>("Row deleted" new_table_pos)<block_end><def_stmt>editor_insert_row self table table_pos<block_start>table.insert_empty_row(table_pos.row_num)<line_sep><return>("Row inserted" TablePos(table_pos.row_num table_pos.field_num))<block_end><def_stmt>editor_insert_single_hline self table table_pos<block_start><raise>TableException("Syntax {0} doesn't support insert single line".format(self.syntax.name))<block_end><def_stmt>editor_insert_double_hline self table table_pos<block_start><raise>TableException("Syntax {0} doesn't support insert double line".format(self.syntax.name))<block_end><def_stmt>editor_insert_hline_and_move self table table_pos<block_start><raise>TableException("Syntax {0} doesn't support insert single line "<concat>"and move".format(self.syntax.name))<block_end><def_stmt>editor_align self table table_pos<block_start><return>("Table aligned" TablePos(table_pos.row_num table_pos.field_num))<block_end><def_stmt>editor_join_lines self table table_pos<block_start><if_stmt>(table_pos.row_num+1<l>len(table)<and>table[table_pos.row_num].is_data()<and>table[table_pos.row_num+1].is_data()<and><not>table.is_row_colspan(table_pos.row_num)<and><not>table.is_row_colspan(table_pos.row_num+1))<block_start><for_stmt>curr_col,next_col zip(table[table_pos.row_num].columns table[table_pos.row_num+1].columns)<block_start>curr_col.data=curr_col.data.strip()+" "+next_col.data.strip()<block_end>table.delete_row(table_pos.row_num+1)<line_sep><return>("Rows joined" TablePos(table_pos.row_num table_pos.field_num))<block_end><else_stmt><block_start><raise>TableException("Join columns is not permitted")<block_end><block_end><def_stmt>editor_next_field self table table_pos<block_start>pos=TablePos(table_pos.row_num table_pos.field_num)<line_sep>moved=<false><while_stmt><true><block_start><if_stmt>table[pos.row_num].is_separator()<block_start><if_stmt>pos.row_num+1<l>len(table)<block_start>pos.field_num=0<line_sep>pos.row_num<augadd>1<line_sep>moved=<true><line_sep><continue><block_end><else_stmt>#sel_row == last_table_row <block_start>table.insert_empty_row(len(table))<line_sep>pos.field_num=0<line_sep>pos.row_num<augadd>1<line_sep><break><block_end><block_end><elif_stmt>moved<block_start><break><block_end><elif_stmt>pos.field_num+1<l>self.visual_column_count(table pos.row_num)<block_start>pos.field_num<augadd>1<line_sep><break><block_end><elif_stmt>pos.row_num+1<l>len(table)<block_start>pos.field_num=0<line_sep>pos.row_num<augadd>1<line_sep>moved=<true><line_sep><continue><block_end><else_stmt>#sel_row == last_table_row <block_start>table.insert_empty_row(len(table))<line_sep>pos.field_num=0<line_sep>pos.row_num<augadd>1<line_sep><break><block_end><block_end><return>("Cursor position changed" pos)<block_end><def_stmt>editor_previous_field self table table_pos<block_start>pos=TablePos(table_pos.row_num table_pos.field_num)<line_sep>moved=<false><while_stmt><true><block_start><if_stmt>table[pos.row_num].is_separator()<block_start><if_stmt>pos.row_num<g>0<block_start>pos.row_num<augsub>1<line_sep>pos.field_num=self.visual_column_count(table pos.row_num)-1<line_sep>moved=<true><line_sep><continue><block_end><else_stmt>#row_num == 0 <block_start>pos.field_num=0<line_sep><break><block_end><block_end><elif_stmt>moved<block_start><break><block_end><elif_stmt>pos.field_num<g>0<block_start>pos.field_num<augsub>1<line_sep><break><block_end><elif_stmt>pos.row_num<g>0<block_start>pos.row_num<augsub>1<line_sep>pos.field_num=self.visual_column_count(table pos.row_num)-1<line_sep>moved=<true><line_sep><continue><block_end><else_stmt>#row_num == 0 <block_start><break><block_end><block_end><return>("Cursor position changed" pos)<block_end><def_stmt>parse_csv self text<block_start><try_stmt><block_start>table=TextTable(self.syntax)<line_sep>dialect=csv.Sniffer().sniff(text)<line_sep>table_reader=csv.reader(text.splitlines() dialect)<for_stmt>cols table_reader<block_start>row=DataRow(table)<for_stmt>col cols<block_start>row.columns.append(DataColumn(row col))<block_end>table.rows.append(row)<block_end><block_end><except_stmt>csv.Error<block_start>table=TextTable(self.syntax)<for_stmt>line text.splitlines()<block_start>row=DataRow(table)<line_sep>row.columns.append(DataColumn(row line))<line_sep>table.rows.append(row)<block_end><block_end>table.pack()<line_sep><return>table<block_end><block_end><class_stmt>BaseTableParser<block_start><def_stmt>__init__ self syntax<block_start>self.syntax=syntax<block_end><def_stmt>parse_row self table line<block_start>row=self.create_row(table line)<for_stmt>line_cell line.cells<block_start>column=self.create_column(table row line_cell)<line_sep>row.append(column)<block_end><return>row<block_end><def_stmt>create_row self table line<block_start><raise>NotImplementedError<block_end><def_stmt>create_column self table row line_cell<block_start>column=row.create_column(line_cell.text)<line_sep>column.left_border_text=line_cell.left_border_text<line_sep>column.right_border_text=line_cell.right_border_text<line_sep><return>column<block_end><def_stmt>is_table_row self row<block_start><return>re.match(r"^\s*[|+]" row)<is><not><none><block_end><def_stmt>parse_text self text<block_start>table=TextTable(self.syntax)<line_sep>lines=text.splitlines()<for_stmt>ind,line enumerate(lines)<block_start>line=self.syntax.line_parser.parse(line)<if_stmt>ind<eq>0<block_start>table.prefix=line.prefix<block_end>row=self.parse_row(table line)<line_sep>table.rows.append(row)<block_end>table.pack()<line_sep><return>table<block_end><block_end>
<import_stmt>torch<as>th<import_from_stmt>graphop *<import_from_stmt>torch.autograd Function<import_from_stmt>part_csr partition_csr<line_sep>chunk_size=32<class_stmt>SparseSoftmax(Function)<block_start>@staticmethod<def_stmt>forward ctx row indptr eid x<block_start>y=sparse_softmax_forward(row indptr eid x)<line_sep>ctx.save_for_backward(row indptr eid y)<line_sep><return>y<block_end>@staticmethod<def_stmt>backward ctx dy<block_start>row,indptr,eid,y=ctx.saved_tensors<line_sep><return><none> <none> <none> sparse_softmax_backward(row indptr eid y dy)<block_end><block_end><class_stmt>MaskedMMCSR(Function)<block_start>@staticmethod<def_stmt>forward ctx row indptr_r eid_r indices_r col indptr_c eid_c indices_c A B<block_start>ctx.save_for_backward(row indptr_r eid_r indices_r col indptr_c eid_c indices_c A B)<line_sep><return>maskedmm_csr_forward(row indptr_r eid_r indices_r A B)<block_end>@staticmethod<def_stmt>backward ctx grad<block_start>row,indptr_r,eid_r,indices_r,col,indptr_c,eid_c,indices_c,A,B=ctx.saved_tensors<line_sep>dA,dB=maskedmm_csr_backward(row indptr_r eid_r indices_r col indptr_c eid_c indices_c A B grad)<line_sep><return><none> <none> <none> <none> <none> <none> <none> <none> dA dB<block_end><block_end><class_stmt>NodeMulEdge(Function)<block_start>@staticmethod<def_stmt>forward ctx row indptr eid A B<block_start>ctx.save_for_backward(row indptr eid A B)<line_sep><return>node_mul_edge_forward(row indptr eid A B)<block_end>@staticmethod<def_stmt>backward ctx grad<block_start>row,indptr,eid,A,B=ctx.saved_tensors<line_sep>dA,dB=node_mul_edge_backward(row indptr eid A B grad)<line_sep><return><none> <none> <none> dA dB<block_end><block_end><class_stmt>VectorSPMM(Function)<block_start>@staticmethod<def_stmt>forward ctx row indptr eid indices col ptr_t eid_t indices_t edata x<block_start>y=vector_spmm_forward(row indptr eid indices edata x)<line_sep>ctx.save_for_backward(row indptr eid indices col ptr_t eid_t indices_t edata x)<line_sep><return>y<block_end>@staticmethod<def_stmt>backward ctx dy<block_start>row,indptr,eid,indices,col,ptr_t,eid_t,indices_t,edata,x=ctx.saved_tensors<line_sep>dedata,dx=vector_spmm_backward(row indptr eid indices col ptr_t eid_t indices_t edata dy x)<line_sep><return><none> <none> <none> <none> <none> <none> <none> <none> dedata dx<block_end><block_end><class_stmt>MaskedMMSimple(Function)<block_start>@staticmethod<def_stmt>forward ctx inc_x inc_y A B<block_start><with_stmt>th.no_grad()<block_start>A_e=th.sparse.mm(inc_x.float() A)# shape: (e, d) B_e=th.sparse.mm(inc_y.float() B)# shape: (e, d) ctx.save_for_backward(A_e B_e inc_x inc_y)<line_sep>y=(A_e<times>B_e).sum(-1)# shape: (e) <block_end><assert_stmt>y.requires_grad<eq><false><line_sep><return>y<block_end>@staticmethod<def_stmt>backward ctx grad# shape: (e) <block_start>A_e,B_e,inc_x,inc_y=ctx.saved_tensors<line_sep>dAe=grad.unsqueeze(-1)<times>B_e<line_sep>dBe=grad.unsqueeze(-1)<times>A_e<line_sep>dA=th.sparse.mm(inc_x.t().float() dAe)<line_sep>dB=th.sparse.mm(inc_y.t().float() dBe)<line_sep><return><none> <none> dA dB<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>os<line_sep>batch_size=512<line_sep>l=30<line_sep>n=batch_size<times>l<line_sep>e=batch_size<times>(l<power>2)<line_sep>v=th.ones(e dtype=th.uint8)<if_stmt><not>os.path.exists('i.pt')<block_start>i=th.zeros(2 e dtype=th.long)<line_sep>eid_r=th.zeros(e dtype=th.long)<line_sep>eid_c=th.zeros(e dtype=th.long)<line_sep>indptr_r=th.zeros(n+1 dtype=th.long)<line_sep>indptr_c=th.zeros(n+1 dtype=th.long)<line_sep>indices_r=th.zeros(e dtype=th.long)<line_sep>indices_c=th.zeros(e dtype=th.long)<line_sep>cnt=0<for_stmt>b range(batch_size)<block_start><for_stmt>x range(b<times>l (b+1)<times>l)<block_start>indptr_r[x]=cnt<for_stmt>y range(b<times>l (b+1)<times>l)<block_start>i[0 cnt]=x<line_sep>i[1 cnt]=y<line_sep>indices_r[cnt]=y<line_sep>eid_r[cnt]=cnt<line_sep>cnt<augadd>1<block_end><block_end><block_end>indptr_r[n]=cnt<line_sep>cnt=0<for_stmt>b range(batch_size)<block_start><for_stmt>y range(b<times>l (b+1)<times>l)<block_start>indptr_c[y]=cnt<for_stmt>x range(b<times>l (b+1)<times>l)<block_start>indices_c[cnt]=x<line_sep>eid_c[cnt]=b<times>l<times>l+(x%l)<times>l+(y%l)<line_sep>cnt<augadd>1<block_end><block_end><block_end>indptr_c[n]=cnt<line_sep>th.save((i eid_r eid_c indptr_r indptr_c indices_r indices_c) 'i.pt')<block_end><else_stmt><block_start>i,eid_r,eid_c,indptr_r,indptr_c,indices_r,indices_c=th.load('i.pt')<block_end>adj=th.sparse.ByteTensor(i v th.Size([n n]))<line_sep>adj_1=th.sparse.FloatTensor(i th.rand(e) th.Size([n n])).cuda(0).coalesce()<line_sep>adj_1.requires_grad_(<true>)<if_stmt><not>os.path.exists('ix.pt')<block_start>i_x=th.zeros(2 e dtype=th.long)<line_sep>i_y=th.zeros(2 e dtype=th.long)<line_sep>cnt=0<for_stmt>b range(batch_size)<block_start><for_stmt>x range(b<times>l (b+1)<times>l)<block_start><for_stmt>y range(b<times>l (b+1)<times>l)<block_start>i_x[0 cnt]=cnt<line_sep>i_x[1 cnt]=x<line_sep>i_y[0 cnt]=cnt<line_sep>i_y[1 cnt]=y<line_sep>cnt<augadd>1<block_end><block_end><block_end>th.save((i_x i_y) 'ixy.pt')<block_end><else_stmt><block_start>i_x,i_y=th.load('ixy.pt')<block_end>inc_x=th.sparse.ByteTensor(i_x v th.Size([e n]))<line_sep>inc_y=th.sparse.ByteTensor(i_y v th.Size([e n]))<import_stmt>time<line_sep>inc_x=inc_x.cuda(0)<line_sep>inc_y=inc_y.cuda(0)<line_sep>adj=adj.cuda(0)<line_sep>eid_r,eid_c,indptr_r,indptr_c,indices_r,indices_c=eid_r.cuda(0) eid_c.cuda(0) indptr_r.cuda(0) indptr_c.cuda(0) indices_r.cuda(0) indices_c.cuda(0)<line_sep>th.cuda.synchronize()<line_sep>print('Single Head (batch size: 512, length: 30, dim: 1024)\n===========================================')<line_sep>print('MaskedNN(src_dot_dst)\nsimple implementation(copy to edge)')<line_sep>dim=1024<line_sep>A=th.rand(n dim requires_grad=<true> device='cuda:0')<line_sep>B=th.rand(n dim requires_grad=<true> device='cuda:0')<line_sep>grad=th.rand(e device='cuda:0')<line_sep>tic=time.time()<line_sep>A_e=th.sparse.mm(inc_x.float() A)<line_sep>B_e=th.sparse.mm(inc_y.float() B)<line_sep>y=(A_e<times>B_e).sum(-1)<line_sep>y_ori=y.clone()<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>A_grad_ori,B_grad_ori=A.grad.clone() B.grad.clone()<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('simple implementation, hand-crafted autograd')<line_sep>tic=time.time()<line_sep>y=MaskedMMSimple.apply(inc_x inc_y A B)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y y_ori)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A.grad A_grad_ori)<and>th.allclose(B.grad B_grad_ori)<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('vanilla bmm')<line_sep>tic=time.time()<line_sep>y=(A.view(batch_size l dim)@B.view(batch_size l dim).transpose(-1 -2)).view(-1)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y y_ori)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A.grad A_grad_ori)<and>th.allclose(B.grad B_grad_ori)<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('custom kernel(csr)')<line_sep>ROW,INDPTR_R=partition_csr(indptr_r chunk_size=chunk_size)<line_sep>COL,INDPTR_C=partition_csr(indptr_c chunk_size=chunk_size)<line_sep>tic=time.time()<line_sep>y=MaskedMMCSR.apply(ROW INDPTR_R eid_r indices_r COL INDPTR_C eid_c indices_c A B)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y y_ori)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A.grad A_grad_ori)<and>th.allclose(B.grad B_grad_ori)<line_sep># ------------------------------------------------------------------------ # Test sparse softmax # ------------------------------------------------------------------------ print('------------------------------------')<line_sep>print('vanilla softmax(scatter)')<line_sep>tic=time.time()<line_sep>x=th.rand(e requires_grad=<true> device='cuda:0')<line_sep>y=th.softmax(x.view(batch_size l l) -1).view(-1)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y_ori=y.clone()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>x_grad_ori=x.grad.clone()<line_sep>x.grad.zero_()<line_sep>print('custom softmax(scatter)')<line_sep>tic=time.time()<line_sep>y=SparseSoftmax.apply(ROW INDPTR_R eid_r x)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(x_grad_ori x.grad rtol=1e-3 atol=1e-6)<line_sep>x.grad.zero_()<line_sep>print('vanilla softmax(gather)')<line_sep>tic=time.time()<line_sep>x=th.rand(e requires_grad=<true> device='cuda:0')<line_sep>y=th.softmax(x.view(batch_size l l) -2).view(-1)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y_ori=y.clone()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>x_grad_ori=x.grad.clone()<line_sep>x.grad.zero_()<line_sep>print('custom softmax(gather)')<line_sep>tic=time.time()<line_sep>y=SparseSoftmax.apply(COL INDPTR_C eid_c x)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(x_grad_ori x.grad rtol=1e-3 atol=1e-6)<line_sep>x.grad.zero_()<line_sep>print('------------------------------------')<line_sep>print("spmm(pytorch coalesce)")<line_sep>A.grad.zero_()<line_sep>grad=th.rand(n dim device='cuda:0')<line_sep>tic=time.time()<line_sep>y=th.sparse.mm(adj_1 A)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>y_ori=y.clone()<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>A_grad_ori=A.grad.clone()<line_sep>adj_grad_ori=adj_1.grad._values()<line_sep>A.grad.zero_()<line_sep>adj_1.grad.zero_()<line_sep>print("vector-spmm(custom)")<line_sep>tic=time.time()<line_sep>val=adj_1._values()<line_sep>val.requires_grad_(<true>)<line_sep>y=VectorSPMM.apply(ROW INDPTR_R eid_r indices_r COL INDPTR_C eid_c indices_c val A)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A_grad_ori A.grad)<and>th.allclose(val.grad adj_grad_ori)<line_sep>A.grad.zero_()<line_sep>val.grad.zero_()<line_sep>""" Multi Head Test """<line_sep>print('\nMulti Head (batch size: 512, length: 30, head: 8, dim:64)\n===========================================')<line_sep>print('NodeMulEdge\nsimple implementation(copy to edge)')<line_sep>dim=64<line_sep>h=8<line_sep>A=th.rand(n dim<times>h requires_grad=<true> device='cuda:0')<line_sep>B=th.rand(e dim requires_grad=<true> device='cuda:0')<line_sep>grad=th.rand(e h device='cuda:0')<line_sep>tic=time.time()<line_sep>A_e=th.sparse.mm(inc_x.float() A)<line_sep>y=(A_e.view(-1 h dim)<times>B.view(-1 1 dim)).sum(-1)<line_sep>y_ori=y.clone()<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>A_grad_ori,B_grad_ori=A.grad.clone() B.grad.clone()<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('custom kernel')<line_sep>tic=time.time()<line_sep>y=NodeMulEdge.apply(ROW INDPTR_R eid_r A.view(-1 h dim) B.view(-1 dim))<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A_grad_ori A.grad)<and>th.allclose(B_grad_ori B.grad)<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('MaskedNN(src_dot_dst)\nsimple implementation(copy to edge)')<line_sep>dim=64<line_sep>h=8<line_sep>A=th.rand(n dim<times>h requires_grad=<true> device='cuda:0')<line_sep>B=th.rand(n dim<times>h requires_grad=<true> device='cuda:0')<line_sep>grad=th.rand(e h device='cuda:0')<line_sep>tic=time.time()<line_sep>A_e=th.sparse.mm(inc_x.float() A)<line_sep>B_e=th.sparse.mm(inc_y.float() B)<line_sep>y=(A_e.view(-1 h dim)<times>B_e.view(-1 h dim)).sum(-1)<line_sep>y_ori=y.clone()<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>A_grad_ori,B_grad_ori=A.grad.clone() B.grad.clone()<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('vanilla bmm')<line_sep>tic=time.time()<line_sep>y=(A.view(batch_size l h dim).contiguous().transpose(1 2)@B.view(batch_size l h dim).contiguous().permute(0 2 3 1)).permute(0 2 3 1).contiguous().view(-1 h)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y y_ori)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A.grad A_grad_ori)<and>th.allclose(B.grad B_grad_ori)<line_sep>A.grad.zero_()<line_sep>B.grad.zero_()<line_sep>print('custom kernel(csr)')<line_sep>tic=time.time()<line_sep>y=MaskedMMCSR.apply(ROW INDPTR_R eid_r indices_r COL INDPTR_C eid_c indices_c A.view(-1 h dim) B.view(-1 h dim))<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y y_ori)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(A.grad A_grad_ori)<and>th.allclose(B.grad B_grad_ori)<line_sep># ------------------------------------------------------------------------ # Test sparse softmax # ------------------------------------------------------------------------ print('------------------------------------')<line_sep>print('vanilla softmax(scatter)')<line_sep>tic=time.time()<line_sep>x=th.rand(e h requires_grad=<true> device='cuda:0')<line_sep>y=th.softmax(x.view(batch_size l l h) -2).view(-1 h)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y_ori=y.clone()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>x_grad_ori=x.grad.clone()<line_sep>x.grad.zero_()<line_sep>print('custom softmax(scatter)')<line_sep>tic=time.time()<line_sep>y=SparseSoftmax.apply(ROW INDPTR_R eid_r x)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(x_grad_ori x.grad rtol=1e-3 atol=1e-6)<line_sep>x.grad.zero_()<line_sep>print('vanilla softmax(gather)')<line_sep>tic=time.time()<line_sep>x=th.rand(e h requires_grad=<true> device='cuda:0')<line_sep>y=th.softmax(x.view(batch_size l l h) -3).view(-1 h)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>tic=time.time()<line_sep>y_ori=y.clone()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>x_grad_ori=x.grad.clone()<line_sep>x.grad.zero_()<line_sep>print('custom softmax(gather)')<line_sep>tic=time.time()<line_sep>y=SparseSoftmax.apply(COL INDPTR_C eid_c x)<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(grad)<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(x_grad_ori x.grad rtol=1e-3 atol=1e-6)<line_sep>x.grad.zero_()<line_sep>adjs=[]<for_stmt>index range(8)<block_start>adj_index=th.sparse.FloatTensor(i th.rand(e) th.Size([n n])).cuda(0).coalesce()<line_sep>adj_index.requires_grad_(<true>)<line_sep>adjs.append(adj_index)<block_end>print('------------------------------------')<line_sep>print("spmm(pytorch coalesce)")<line_sep>A.grad.zero_()<line_sep>grad=[th.rand(n dim device='cuda:0')<for>_ range(8)]<line_sep>tic=time.time()<line_sep>ys=[]<for_stmt>index range(8)<block_start>ys.append(th.sparse.mm(adjs[index] A.view(n h dim)[: index :]))<block_end>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<line_sep>y_ori=th.cat([y.clone().view(n 1 dim)<for>y ys] dim=-2)<line_sep>tic=time.time()<for_stmt>index range(8)<block_start>ys[index].backward(grad[index])<block_end>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<line_sep>A_grad_ori=A.grad.clone()<line_sep>adj_grad_ori=th.cat([_.grad._values().view(e 1)<for>_ adjs] dim=-1)<line_sep>A.grad.zero_()<for_stmt>index range(8)<block_start>adjs[index].grad.zero_()<block_end>print("vector-spmm(custom)")<line_sep>val=th.cat([_._values().view(-1 1)<for>_ adjs] dim=-1)<line_sep>val.requires_grad_(<true>)<line_sep>tic=time.time()<line_sep>y=VectorSPMM.apply(ROW INDPTR_R eid_r indices_r COL INDPTR_C eid_c indices_c val A.view(n h dim))<line_sep>th.cuda.synchronize()<line_sep>print('forward elapse time: {}'.format(time.time()-tic))<assert_stmt>th.allclose(y_ori y)<line_sep>tic=time.time()<line_sep>y.backward(th.cat([_.view(n 1 dim)<for>_ grad] dim=-2))<line_sep>th.cuda.synchronize()<line_sep>print('backward elapse time: {}'.format(time.time()-tic))<block_end>
"""Module for spaCy v3 compatible SpaczzRuler."""<import_from_future_stmt> annotations<import_from_stmt>collections defaultdict<import_from_stmt>itertools chain<import_from_stmt>logging exception<import_from_stmt>pathlib Path<import_from_stmt>typing Any Callable DefaultDict Dict Iterable List Optional Set Tuple Union <import_stmt>warnings<try_stmt><block_start><import_from_stmt>spacy.language Language<import_from_stmt>spacy.pipeline Pipe<import_from_stmt>spacy.scorer get_ner_prf<import_from_stmt>spacy.tokens Doc Span<import_from_stmt>spacy.training Example validate_examples<import_from_stmt>spacy.util SimpleFrozenDict SimpleFrozenList<block_end><except_stmt>ImportError# pragma: no cover <block_start><raise>ImportError(("Trying to import spaCy v3 compatible SpaczzRuler from spaCy v2." "Please upgrade or use the SpaczzRuler in _spaczzruler-legacy" ))<block_end><import_stmt>srsly<import_from_stmt>..exceptions PatternTypeWarning<import_from_stmt>..matcher FuzzyMatcher RegexMatcher TokenMatcher<import_from_stmt>..regex RegexConfig<import_from_stmt>..util ensure_path nest_defaultdict read_from_disk write_to_disk<line_sep>DEFAULT_ENT_ID_SEP="||"<line_sep>simple_frozen_dict=SimpleFrozenDict()<line_sep>simple_frozen_list=SimpleFrozenList()<line_sep>@Language.factory("spaczz_ruler" assigns=["doc.ents" "token.ent_type" "token.ent_iob"] default_config={"overwrite_ents":<false> "ent_id_sep":DEFAULT_ENT_ID_SEP "fuzzy_defaults":simple_frozen_dict "regex_defaults":simple_frozen_dict "token_defaults":simple_frozen_dict } default_score_weights={"ents_f":1.0 "ents_p":0.0 "ents_r":0.0 "ents_per_type":<none> } )<def_stmt>make_spaczz_ruler # typing nlp with Language causes issue with Pydantic in spaCy integration nlp:Any name:str overwrite_ents:bool ent_id_sep:str fuzzy_defaults:Dict[str Any] regex_defaults:Dict[str Any] token_defaults:Dict[str Any] <arrow>SpaczzRuler<block_start>"""Factory method for creating a `SpaczzRuler`."""<line_sep><return>SpaczzRuler(nlp name overwrite_ents=overwrite_ents ent_id_sep=ent_id_sep fuzzy_defaults=fuzzy_defaults regex_defaults=regex_defaults token_defaults=token_defaults )<block_end><class_stmt>SpaczzRuler(Pipe)<block_start>"""The `SpaczzRuler` adds fuzzy and multi-token regex matches to spaCy `Doc.ents`. It can be combined with other spaCy NER components like the statistical `EntityRecognizer` and/or the `EntityRuler` to boost accuracy. After initialization, the component is typically added to the pipeline using `nlp.add_pipe`. Attributes: nlp: The shared nlp object to pass the vocab to the matchers (not currently used by spaczz matchers) and process fuzzy patterns. fuzzy_patterns: Patterns added to the fuzzy matcher. regex_patterns: Patterns added to the regex matcher. token_patterns: Patterns added to the token matcher fuzzy_matcher: The `FuzzyMatcher` instance the spaczz ruler will use for fuzzy phrase matching. regex_matcher: The `RegexMatcher` instance the spaczz ruler will use for regex phrase matching. token_matcher: The `TokenMatcher` instance the spaczz ruler will use for token matching. defaults: Default matching settings for their respective matchers. """<line_sep>name="spaczz_ruler"<def_stmt>__init__ self:SpaczzRuler nlp:Language name:str="spaczz_ruler" * overwrite_ents:bool=<false> ent_id_sep:str=DEFAULT_ENT_ID_SEP fuzzy_defaults:Dict[str Any]=simple_frozen_dict regex_defaults:Dict[str Any]=simple_frozen_dict token_defaults:Dict[str Any]=simple_frozen_dict regex_config:Union[str RegexConfig]="default" patterns:Optional[Iterable[Dict[str Any]]]=<none> **kwargs:Any <arrow><none><block_start>"""Initialize the spaczz ruler. If `patterns` is supplied here, it needs to be an iterable of spaczz patterns: dictionaries with `"label"`, `"pattern"`, and `"type"` keys. If the patterns are fuzzy or regex phrase patterns they can include the optional `"kwargs"` keys. For example, a fuzzy phrase pattern: `{'label': 'ORG', 'pattern': 'Apple', 'type': 'fuzzy', 'kwargs': {'min_r2': 90}}` Or, a token pattern: `{'label': 'ORG', 'pattern': [{'TEXT': {'FUZZY': 'Apple'}}], 'type': 'token'}` Prior to spaczz v0.5, optional parameters had to be prepended with "spaczz_" to prevent potential conflicts with other spaCy components. As of spaCy v3 this is no longer an issue so prepending optional parameters with "spaczz_" is no longer necessary. Args: nlp: The shared `Language` object to pass the vocab to the matchers and process fuzzy patterns. name: Instance name of the current pipeline component. Typically passed in automatically from the factory when the component is added. Used to disable the current entity ruler while creating phrase patterns with the nlp object. overwrite_ents: If existing entities are present, e.g. entities added by the model, overwrite them by matches if necessary. Default is `False`. ent_id_sep: Separator used internally for entity IDs. fuzzy_defaults: Modified default parameters to use with the `FuzzyMatcher`. Default is `None`. regex_defaults: Modified default parameters to use with the `RegexMatcher`. Default is `None`. token_defaults: Modified default parameters to use with the `TokenMatcher`. Default is `None`. regex_config: Should largely be ignored as an artifact of an old spaczz design pattern. Will likely be updated in the future. Default is `"default"`. patterns: Optional patterns to load in. Default is `None`. kwargs: For backwards compatibility with "spaczz_" prepended parameters. Raises: TypeError: If matcher defaults passed are not dictionaries. """<line_sep>self.nlp=nlp<line_sep>self.name=name<line_sep>self.overwrite=kwargs.get("spaczz_overwrite_ents" overwrite_ents)<line_sep>self.fuzzy_patterns:DefaultDict[str DefaultDict[str Any]]=nest_defaultdict(list 2)<line_sep>self.regex_patterns:DefaultDict[str DefaultDict[str Any]]=nest_defaultdict(list 2)<line_sep>self.token_patterns:DefaultDict[str List[List[Dict[str Any]]]]=defaultdict(list)<line_sep>self.ent_id_sep=kwargs.get("spaczz_ent_id_sep" ent_id_sep)<line_sep>self._ent_ids:DefaultDict[Any Any]=defaultdict(dict)<line_sep>self.defaults={}<line_sep>default_names=("fuzzy_defaults" "regex_defaults" "token_defaults" )<line_sep>fuzzy_defaults=kwargs.get("spaczz_fuzzy_defaults" fuzzy_defaults)<line_sep>regex_defaults=kwargs.get("spaczz_regex_defaults" regex_defaults)<line_sep>token_defaults=kwargs.get("spaczz_token_defaults" token_defaults)<for_stmt>default,name zip((fuzzy_defaults regex_defaults token_defaults) default_names)<block_start><if_stmt>isinstance(default dict)<block_start>self.defaults[name]=default<block_end><else_stmt><block_start><raise>TypeError(("Defaults must be a dictionary of keyword arguments," f"not {type(default)}." ))<block_end><block_end>self.fuzzy_matcher=FuzzyMatcher(nlp.vocab **self.defaults["fuzzy_defaults"])<line_sep>self.regex_matcher=RegexMatcher(nlp.vocab regex_config **self.defaults["regex_defaults"])<line_sep>self.token_matcher=TokenMatcher(nlp.vocab **self.defaults["token_defaults"])<line_sep>patterns=kwargs.get("spaczz_patterns" patterns)<if_stmt>patterns<is><not><none><block_start>self.add_patterns(patterns)<block_end><block_end><def_stmt>__call__ self:SpaczzRuler doc:Doc<arrow>Doc<block_start>"""Find matches in document and add them as entities. Args: doc: The Doc object in the pipeline. Returns: The Doc with added entities, if available. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> doc = nlp.make_doc("My name is <NAME>") >>> ruler.add_patterns([{"label": "NAME", "pattern": "<NAME>", "type": "fuzzy", "kwargs": {"fuzzy_func": "token_sort"}}]) >>> doc = ruler(doc) >>> "<NAME>" in [ent.text for ent in doc.ents] True """<line_sep>error_handler=self.get_error_handler()<try_stmt><block_start>matches,lookup=self.match(doc)<line_sep>self.set_annotations(doc matches lookup)<line_sep><return>doc<block_end><except_stmt>exception<as>e# type: ignore <block_start>error_handler(self.name self [doc] e)<block_end><block_end><def_stmt>__contains__ self:SpaczzRuler label:str<arrow>bool<block_start>"""Whether a label is present in the patterns."""<line_sep><return>(label<in>self.fuzzy_patterns<or>label<in>self.regex_patterns<or>label<in>self.token_patterns)<block_end><def_stmt>__len__ self:SpaczzRuler<arrow>int<block_start>"""The number of all patterns added to the ruler."""<line_sep>n_fuzzy_patterns=sum(len(p["patterns"])<for>p self.fuzzy_patterns.values())<line_sep>n_regex_patterns=sum(len(p["patterns"])<for>p self.regex_patterns.values())<line_sep>n_token_patterns=sum(len(p)<for>p self.token_patterns.values())<line_sep><return>n_fuzzy_patterns+n_regex_patterns+n_token_patterns<block_end>@property<def_stmt>ent_ids self:SpaczzRuler<arrow>Tuple[Optional[str] <ellipsis>]<block_start>"""All entity ids present in the match patterns id properties. Returns: The unique string entity ids as a tuple. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy", "id": "BEAT"}]) >>> ruler.ent_ids ('BEAT',) """<line_sep>keys=set(self.fuzzy_patterns.keys())<line_sep>keys.update(self.regex_patterns.keys())<line_sep>keys.update(self.token_patterns.keys())<line_sep>all_ent_ids=set()<for_stmt>k keys<block_start><if_stmt>self.ent_id_sep<in>k<block_start>_,ent_id=self._split_label(k)<line_sep>all_ent_ids.add(ent_id)<block_end><block_end>all_ent_ids_tuple=tuple(all_ent_ids)<line_sep><return>all_ent_ids_tuple<block_end>@property<def_stmt>labels self:SpaczzRuler<arrow>Tuple[str <ellipsis>]<block_start>"""All labels present in the ruler. Returns: The unique string labels as a tuple. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy"}]) >>> ruler.labels ('AUTHOR',) """<line_sep>keys=set(self.fuzzy_patterns.keys())<line_sep>keys.update(self.regex_patterns.keys())<line_sep>keys.update(self.token_patterns.keys())<line_sep>all_labels=set()<for_stmt>k keys<block_start><if_stmt>self.ent_id_sep<in>k<block_start>label,_=self._split_label(k)<line_sep>all_labels.add(label)<block_end><else_stmt><block_start>all_labels.add(k)<block_end><block_end><return>tuple(all_labels)<block_end>@property<def_stmt>patterns self:SpaczzRuler<arrow>List[Dict[str Any]]<block_start>"""Get all patterns and kwargs that were added to the ruler. Returns: The original patterns and kwargs, one dictionary for each combination. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "STREET", "pattern": "street_addresses", "type": "regex", "kwargs": {"predef": True}}]) >>> ruler.patterns == [ { "label": "STREET", "pattern": "street_addresses", "type": "regex", "kwargs": {"predef": True}, }, ] True """<line_sep>all_patterns=[]<for_stmt>label,fuzzy_patterns self.fuzzy_patterns.items()<block_start><for_stmt>fuzzy_pattern,fuzzy_kwargs zip(fuzzy_patterns["patterns"] fuzzy_patterns["kwargs"])<block_start>ent_label,ent_id=self._split_label(label)<line_sep>p={"label":ent_label "pattern":fuzzy_pattern.text "type":"fuzzy"}<if_stmt>fuzzy_kwargs<block_start>p["kwargs"]=fuzzy_kwargs<block_end><if_stmt>ent_id<block_start>p["id"]=ent_id<block_end>all_patterns.append(p)<block_end><block_end><for_stmt>label,regex_patterns self.regex_patterns.items()<block_start><for_stmt>regex_pattern,regex_kwargs zip(regex_patterns["patterns"] regex_patterns["kwargs"])<block_start>ent_label,ent_id=self._split_label(label)<line_sep>p={"label":ent_label "pattern":regex_pattern "type":"regex"}<if_stmt>regex_kwargs<block_start>p["kwargs"]=regex_kwargs<block_end><if_stmt>ent_id<block_start>p["id"]=ent_id<block_end>all_patterns.append(p)<block_end><block_end><for_stmt>label,token_patterns self.token_patterns.items()<block_start><for_stmt>token_pattern token_patterns<block_start>ent_label,ent_id=self._split_label(label)<line_sep>p={"label":ent_label "pattern":token_pattern "type":"token"}<if_stmt>ent_id<block_start>p["id"]=ent_id<block_end>all_patterns.append(p)<block_end><block_end><return>all_patterns<block_end><def_stmt>add_patterns self:SpaczzRuler patterns:Iterable[Dict[str Any]] <arrow><none><block_start>"""Add patterns to the ruler. A pattern must be a spaczz pattern: `{label (str), pattern (str or list), type (str), optional kwargs (dict[str, Any]), and optional id (str)}`. For example, a fuzzy phrase pattern: `{'label': 'ORG', 'pattern': 'Apple', 'type': 'fuzzy', 'kwargs': {'min_r2': 90}}` Or, a token pattern: `{'label': 'ORG', 'pattern': [{'TEXT': {'FUZZY': 'Apple'}}], 'type': 'token'}` To utilize regex flags, use inline flags. Kwarg details to be updated. Args: patterns: The spaczz patterns to add. Raises: TypeError: If patterns is not an iterable of dictionaries. ValueError: If one or more patterns do not conform the spaczz pattern structure. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy"}]) >>> "AUTHOR" in ruler.labels True """<line_sep># disable the nlp components after this one in case # they hadn't been initialized / deserialised yet <try_stmt><block_start>current_index=-1<for_stmt>i,(_name pipe) enumerate(self.nlp.pipeline)<block_start><if_stmt>self<eq>pipe<block_start>current_index=i<line_sep><break><block_end><block_end>subsequent_pipes=[pipe<for>pipe self.nlp.pipe_names[current_index+1:]]<block_end><except_stmt>ValueError<block_start>subsequent_pipes=[]<block_end><with_stmt>self.nlp.select_pipes(disable=subsequent_pipes)<block_start>token_patterns=[]<line_sep>fuzzy_pattern_labels=[]<line_sep>fuzzy_pattern_texts=[]<line_sep>fuzzy_pattern_kwargs=[]<line_sep>fuzzy_pattern_ids=[]<line_sep>regex_pattern_labels=[]<line_sep>regex_pattern_texts=[]<line_sep>regex_pattern_kwargs=[]<line_sep>regex_pattern_ids=[]<for_stmt>entry patterns<block_start><try_stmt><block_start><if_stmt>isinstance(entry dict)<block_start><if_stmt>entry["type"]<eq>"fuzzy"<block_start>fuzzy_pattern_labels.append(entry["label"])<line_sep>fuzzy_pattern_texts.append(entry["pattern"])<line_sep>fuzzy_pattern_kwargs.append(entry.get("kwargs" {}))<line_sep>fuzzy_pattern_ids.append(entry.get("id"))<block_end><elif_stmt>entry["type"]<eq>"regex"<block_start>regex_pattern_labels.append(entry["label"])<line_sep>regex_pattern_texts.append(entry["pattern"])<line_sep>regex_pattern_kwargs.append(entry.get("kwargs" {}))<line_sep>regex_pattern_ids.append(entry.get("id"))<block_end><elif_stmt>entry["type"]<eq>"token"<block_start>token_patterns.append(entry)<block_end><else_stmt><block_start>warnings.warn(f"""Spaczz pattern "type" must be "fuzzy", "regex", or "token", not {entry["type"]}. Skipping this pattern. """ PatternTypeWarning )<block_end><block_end><else_stmt><block_start><raise>TypeError(("Patterns must either be an iterable of dicts."))<block_end><block_end><except_stmt>KeyError<block_start><raise>ValueError(("One or more patterns do not conform" "to spaczz pattern structure: " "{label (str), pattern (str or list), type (str)," "optional kwargs (dict[str, Any])," "and optional id (str)}." ))<block_end><block_end>fuzzy_patterns=[]<for_stmt>label,pattern,kwargs,ent_id zip(fuzzy_pattern_labels self.nlp.pipe(fuzzy_pattern_texts) fuzzy_pattern_kwargs fuzzy_pattern_ids )<block_start>fuzzy_pattern={"label":label "pattern":pattern "kwargs":kwargs "type":"fuzzy" }<if_stmt>ent_id<block_start>fuzzy_pattern["id"]=ent_id<block_end>fuzzy_patterns.append(fuzzy_pattern)<block_end>regex_patterns=[]<for_stmt>label,pattern,kwargs,ent_id zip(regex_pattern_labels regex_pattern_texts regex_pattern_kwargs regex_pattern_ids )<block_start>regex_pattern={"label":label "pattern":pattern "kwargs":kwargs "type":"regex" }<if_stmt>ent_id<block_start>regex_pattern["id"]=ent_id<block_end>regex_patterns.append(regex_pattern)<block_end>self._add_patterns(fuzzy_patterns regex_patterns token_patterns)<block_end><block_end><def_stmt>clear self:SpaczzRuler<arrow><none><block_start>"""Reset all patterns."""<line_sep>self.fuzzy_patterns=nest_defaultdict(list 2)<line_sep>self.regex_patterns=nest_defaultdict(list 2)<line_sep>self.token_patterns=defaultdict(list)<line_sep>self._ent_ids=defaultdict(dict)<block_end><def_stmt>initialize self:SpaczzRuler get_examples:Callable[[] Iterable[Example]] * nlp:Optional[Language]=<none> patterns:Optional[Iterable[Dict[str Any]]]=<none> <arrow><none><block_start>"""Initialize the pipe for training. Args: get_examples: Function that returns a representative sample of gold-standard Example objects. nlp: The current nlp object the component is part of. patterns: The list of patterns. """<line_sep>self.clear()<if_stmt>patterns<block_start>self.add_patterns(patterns)<block_end><block_end><def_stmt>match self:SpaczzRuler doc:Doc<arrow>Tuple[List[Tuple[str int int]] DefaultDict[str Dict[Tuple[str int int] Any]] ]<block_start>"""Used in call to find matches in a doc."""<line_sep>fuzzy_matches=[]<line_sep>lookup:DefaultDict[str Dict[Tuple[str int int] Any]]=defaultdict(dict)<for_stmt>fuzzy_match self.fuzzy_matcher(doc)<block_start>current_ratio=fuzzy_match[3]<line_sep>best_ratio=lookup["ratios"].get(fuzzy_match[:3] 0)<if_stmt>current_ratio<g>best_ratio<block_start>fuzzy_matches.append(fuzzy_match[:3])<line_sep>lookup["ratios"][fuzzy_match[:3]]=current_ratio<block_end><block_end>regex_matches=[]<for_stmt>regex_match self.regex_matcher(doc)<block_start>current_counts=regex_match[3]<line_sep>best_counts=lookup["counts"].get(regex_match[:3])<if_stmt><not>best_counts<or>sum(current_counts)<l>sum(best_counts)<block_start>regex_matches.append(regex_match[:3])<line_sep>lookup["counts"][regex_match[:3]]=current_counts<block_end><block_end>token_matches=[]<for_stmt>token_match self.token_matcher(doc)<block_start>token_matches.append(token_match[:3])<line_sep>lookup["details"][token_match[:3]]=1<block_end>matches=fuzzy_matches+regex_matches+token_matches<line_sep>unique_matches,lookup=self._filter_overlapping_matches(matches lookup)<line_sep><return>unique_matches lookup<block_end><def_stmt>score self:SpaczzRuler examples:Any **kwargs:Any<arrow>Any<block_start>"""Pipeline scoring for spaCy compatibility."""<line_sep>validate_examples(examples "SpaczzRuler.score")<line_sep><return>get_ner_prf(examples)<block_end><def_stmt>set_annotations self:SpaczzRuler doc:Doc matches:List[Tuple[str int int]] lookup:DefaultDict[str Dict[Tuple[str int int] Union[int Tuple[int int int]]]] <arrow><none><block_start>"""Modify the document in place."""<line_sep>entities=list(doc.ents)<line_sep>new_entities=[]<line_sep>seen_tokens:Set[int]=set()<for_stmt>match_id,start,end matches<block_start><if_stmt>any(t.ent_type<for>t doc[start:end])<and><not>self.overwrite<block_start><continue><block_end># check for end - 1 here because boundaries are inclusive <if_stmt>start<not><in>seen_tokens<and>end-1<not><in>seen_tokens<block_start><if_stmt>match_id<in>self._ent_ids<block_start>label,ent_id=self._ent_ids[match_id]<line_sep>span=Span(doc start end label=label)<if_stmt>ent_id<block_start><for_stmt>token span<block_start>token.ent_id_=ent_id<block_end><block_end><block_end><else_stmt><block_start>span=Span(doc start end label=match_id)<block_end>span=self._update_custom_attrs(span match_id lookup)<line_sep>new_entities.append(span)<line_sep>entities=[e<for>e entities<if><not>(e.start<l>end<and>e.end<g>start)]<line_sep>seen_tokens.update(range(start end))<block_end><block_end>doc.ents=entities+new_entities<block_end><def_stmt>from_bytes self:SpaczzRuler patterns_bytes:bytes * exclude:Iterable[str]=simple_frozen_list <arrow>SpaczzRuler<block_start>"""Load the spaczz ruler from a bytestring. Args: patterns_bytes : The bytestring to load. exclude: For spaCy consistency. Returns: The loaded spaczz ruler. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy"}]) >>> ruler_bytes = ruler.to_bytes() >>> new_ruler = SpaczzRuler(nlp) >>> new_ruler = new_ruler.from_bytes(ruler_bytes) >>> "AUTHOR" in new_ruler True """<line_sep>cfg=srsly.msgpack_loads(patterns_bytes)<line_sep>self.clear()<if_stmt>isinstance(cfg dict)<block_start>self.add_patterns(cfg.get("patterns" cfg))<line_sep>self.defaults=cfg.get("defaults" {})<if_stmt>self.defaults.get("fuzzy_defaults")<block_start>self.fuzzy_matcher=FuzzyMatcher(self.nlp.vocab **self.defaults["fuzzy_defaults"])<block_end><if_stmt>self.defaults.get("regex_defaults")<block_start>self.regex_matcher=RegexMatcher(self.nlp.vocab **self.defaults["regex_defaults"])<block_end><if_stmt>self.defaults.get("token_defaults")<block_start>self.token_matcher=TokenMatcher(self.nlp.vocab **self.defaults["token_defaults"])<block_end>self.overwrite=cfg.get("overwrite" <false>)<line_sep>self.ent_id_sep=cfg.get("ent_id_sep" DEFAULT_ENT_ID_SEP)<block_end><else_stmt><block_start>self.add_patterns(cfg)<block_end><return>self<block_end><def_stmt>to_bytes self:SpaczzRuler * exclude:Iterable[str]=simple_frozen_list<arrow>bytes<block_start>"""Serialize the spaczz ruler patterns to a bytestring. Args: exclude: For spaCy consistency. Returns: The serialized patterns. Example: >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy"}]) >>> ruler_bytes = ruler.to_bytes() >>> isinstance(ruler_bytes, bytes) True """<line_sep>serial={"overwrite":self.overwrite "ent_id_sep":self.ent_id_sep "patterns":self.patterns "defaults":self.defaults }<line_sep><return>srsly.msgpack_dumps(serial)<block_end><def_stmt>from_disk self:SpaczzRuler path:Union[str Path] * exclude:Iterable[str]=simple_frozen_list <arrow>SpaczzRuler<block_start>"""Load the spaczz ruler from a file. Expects a file containing newline-delimited JSON (JSONL) with one entry per line. Args: path: The JSONL file to load. exclude: For spaCy consistency. Returns: The loaded spaczz ruler. Example: >>> import os >>> import tempfile >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy"}]) >>> with tempfile.TemporaryDirectory() as tmpdir: >>> ruler.to_disk(f"{tmpdir}/ruler") >>> new_ruler = SpaczzRuler(nlp) >>> new_ruler = new_ruler.from_disk(f"{tmpdir}/ruler") >>> "AUTHOR" in new_ruler True """<line_sep>path=ensure_path(path)<line_sep>self.clear()<line_sep>depr_patterns_path=path.with_suffix(".jsonl")<if_stmt>depr_patterns_path.is_file()<block_start>patterns=srsly.read_jsonl(depr_patterns_path)<line_sep>self.add_patterns(patterns)<block_end><else_stmt><block_start>cfg={}<line_sep>deserializers_patterns={"patterns":<lambda>p:self.add_patterns(srsly.read_jsonl(p.with_suffix(".jsonl")))}<line_sep>deserializers_cfg={"cfg":<lambda>p:cfg.update(srsly.read_json(p))}<line_sep>read_from_disk(path deserializers_cfg {})<line_sep>self.overwrite=cfg.get("overwrite" <false>)<line_sep>self.defaults=cfg.get("defaults" {})<if_stmt>self.defaults.get("fuzzy_defaults")<block_start>self.fuzzy_matcher=FuzzyMatcher(self.nlp.vocab **self.defaults["fuzzy_defaults"])<block_end><if_stmt>self.defaults.get("regex_defaults")<block_start>self.regex_matcher=RegexMatcher(self.nlp.vocab **self.defaults["regex_defaults"])<block_end><if_stmt>self.defaults.get("token_defaults")<block_start>self.token_matcher=TokenMatcher(self.nlp.vocab **self.defaults["token_defaults"])<block_end>self.ent_id_sep=cfg.get("ent_id_sep" DEFAULT_ENT_ID_SEP)<line_sep>read_from_disk(path deserializers_patterns {})<block_end><return>self<block_end><def_stmt>to_disk self:SpaczzRuler path:Union[str Path] * exclude:Iterable[str]=simple_frozen_list <arrow><none><block_start>"""Save the spaczz ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). Args: path: The JSONL file to save. exclude: For spaCy consistency. Example: >>> import os >>> import tempfile >>> import spacy >>> from spaczz.pipeline import SpaczzRuler >>> nlp = spacy.blank("en") >>> ruler = SpaczzRuler(nlp) >>> ruler.add_patterns([{"label": "AUTHOR", "pattern": "Kerouac", "type": "fuzzy"}]) >>> with tempfile.TemporaryDirectory() as tmpdir: >>> ruler.to_disk(f"{tmpdir}/ruler") >>> isdir = os.path.isdir(f"{tmpdir}/ruler") >>> isdir True """<line_sep>path=ensure_path(path)<line_sep>cfg={"overwrite":self.overwrite "defaults":self.defaults "ent_id_sep":self.ent_id_sep }<line_sep>serializers={"patterns":<lambda>p:srsly.write_jsonl(p.with_suffix(".jsonl") self.patterns) "cfg":<lambda>p:srsly.write_json(p cfg) }<if_stmt>path.suffix<eq>".jsonl"# user wants to save only JSONL <block_start>srsly.write_jsonl(path self.patterns)<block_end><else_stmt><block_start>write_to_disk(path serializers {})<block_end><block_end><def_stmt>_add_patterns self:SpaczzRuler fuzzy_patterns:List[Dict[str Any]] regex_patterns:List[Dict[str Any]] token_patterns:List[Dict[str Any]] <arrow><none><block_start>"""Helper function for add_patterns."""<for_stmt>entry fuzzy_patterns+regex_patterns+token_patterns<block_start>label=entry["label"]<if_stmt>"id"<in>entry<block_start>ent_label=label<line_sep>label=self._create_label(label entry["id"])<line_sep>self._ent_ids[label]=(ent_label entry["id"])<block_end>pattern=entry["pattern"]<if_stmt>isinstance(pattern Doc)<block_start>self.fuzzy_patterns[label]["patterns"].append(pattern)<line_sep>self.fuzzy_patterns[label]["kwargs"].append(entry["kwargs"])<block_end><elif_stmt>isinstance(pattern str)<block_start>self.regex_patterns[label]["patterns"].append(pattern)<line_sep>self.regex_patterns[label]["kwargs"].append(entry["kwargs"])<block_end><elif_stmt>isinstance(pattern list)<block_start>self.token_patterns[label].append(pattern)<block_end><else_stmt><block_start><raise>ValueError(("One or more patterns do not conform" "to spaczz pattern structure:" "{label (str), pattern (str or list), type (str)," "optional kwargs (dict[str, Any])," "and optional id (str)}." ))<block_end><block_end><for_stmt>label,patterns self.fuzzy_patterns.items()<block_start>self.fuzzy_matcher.add(label patterns["patterns"] patterns["kwargs"])<block_end><for_stmt>label,patterns self.regex_patterns.items()<block_start>self.regex_matcher.add(label patterns["patterns"] patterns["kwargs"])<block_end><for_stmt>label,_token_patterns self.token_patterns.items()<block_start>self.token_matcher.add(label _token_patterns)<block_end><block_end><def_stmt>_create_label self:SpaczzRuler label:str ent_id:Union[str <none>]<arrow>str<block_start>"""Join Entity label with ent_id if the pattern has an id attribute. Args: label: The entity label. ent_id: The optional entity id. Returns: The label and ent_id joined with configured ent_id_sep. """<if_stmt>isinstance(ent_id str)<block_start>label="{}{}{}".format(label self.ent_id_sep ent_id)<block_end><return>label<block_end><def_stmt>_split_label self:SpaczzRuler label:str<arrow>Tuple[str Union[str <none>]]<block_start>"""Split Entity label into ent_label and ent_id if it contains self.ent_id_sep. Args: label: The value of label in a pattern entry Returns: The separated ent_label and optional ent_id. """<if_stmt>self.ent_id_sep<in>label<block_start>ent_label,ent_id=label.rsplit(self.ent_id_sep 1)<line_sep><return>ent_label ent_id<block_end><else_stmt><block_start>ent_label=label<line_sep><return>ent_label <none><block_end><block_end>@staticmethod<def_stmt>_filter_overlapping_matches matches:List[Tuple[str int int]] lookup:DefaultDict[str Dict[Tuple[str int int] Any]] <arrow>Tuple[List[Tuple[str int int]] DefaultDict[str Dict[Tuple[str int int] Any]]]<block_start>"""Prevents multiple match spans from overlapping. Expects matches to be pre-sorted by matcher priority, with each matcher's matches being pre-sorted by descending length, then ascending start index, then descending match score If more than one match span includes the same tokens the first of these match spans in matches is kept. It also removes non-kept matches from the lookup dict as well. Args: matches: List of match span tuples (match_id, start_index, end_index). lookup: Match ratio, count and detail values in a `defaultdict(dict)`. Returns: The filtered list of match span tuples. """<line_sep>filtered_matches:List[Tuple[str int int]]=[]<for_stmt>match matches<block_start><if_stmt><not>set(range(match[1] match[2])).intersection(chain(*[set(range(n[1] n[2]))<for>n filtered_matches]))<block_start>filtered_matches.append(match)<if_stmt>match<in>lookup["ratios"]<block_start>_=lookup["counts"].pop(match <none>)<line_sep>_=lookup["details"].pop(match <none>)<block_end><elif_stmt>match<in>lookup["counts"]<block_start>_=lookup["details"].pop(match <none>)<block_end><block_end><block_end><return>filtered_matches lookup<block_end>@staticmethod<def_stmt>_update_custom_attrs span:Span match_id:str lookup:DefaultDict[str Dict[Tuple[str int int] Any]] <arrow>Span<block_start>"""Update custom attributes for matches."""<line_sep>ratio=lookup["ratios"].get((match_id span.start span.end))<line_sep>counts=lookup["counts"].get((match_id span.start span.end))<line_sep>details=lookup["details"].get((match_id span.start span.end))<for_stmt>token span<block_start>token._.spaczz_token=<true><if_stmt>ratio<block_start>token._.spaczz_ratio=ratio<line_sep>token._.spaczz_type="fuzzy"<block_end><elif_stmt>counts<block_start>token._.spaczz_counts=counts<line_sep>token._.spaczz_type="regex"<block_end><elif_stmt>details<block_start>token._.spaczz_details=details<line_sep>token._.spaczz_type="token"<block_end><block_end><return>span<block_end><block_end>
<import_stmt>testlib<import_stmt>test_combinators<def_stmt>forward env<block_start>lit1=env.lits[0]<line_sep>lit2=env.lits[1]<line_sep>test_combinators.run_close_test(env lit1 lit2 lit1)<block_end><def_stmt>reverse env<block_start>lit1=env.lits[0]<line_sep>lit2=env.lits[1]<line_sep>test_combinators.run_close_test(env lit1 lit2 lit1)<block_end>
# flake8: noqa <import_from_stmt>.binarizer MagnitudeBinarizer ThresholdBinarizer TopKBinarizer<import_from_stmt>.masked_nn MaskedLinear<line_sep>
<import_from_stmt>fireant.dataset.fields Field<import_from_stmt>fireant.dataset.klass DataSet<import_from_stmt>fireant.queries.builder DataSetBlenderQueryBuilder DimensionChoicesQueryBuilder <import_from_stmt>fireant.utils deepcopy immutable ordered_distinct_list_by_attr <def_stmt>_wrap_dataset_fields dataset<block_start><if_stmt>isinstance(dataset DataSetBlender)<block_start><return>dataset.fields<block_end>wrapped_fields=[]<for_stmt>field dataset.fields<block_start>wrapped_field=_wrap_field(dataset field)<line_sep>wrapped_fields.append(wrapped_field)<block_end><return>wrapped_fields<block_end><def_stmt>_wrap_field dataset field<block_start>wrapped_field=Field(alias=field.alias definition=field data_type=field.data_type label=field.label hint_table=field.hint_table prefix=field.prefix suffix=field.suffix thousands=field.thousands precision=field.precision hyperlink_template=field.hyperlink_template )<if_stmt><not>field.definition.is_aggregate<block_start>wrapped_field.choices=DimensionChoicesBlenderQueryBuilder(dataset field)<block_end><return>wrapped_field<block_end><class_stmt>DataSetBlender<block_start>""" The DataSetBlender class is the DataSet equivalent for implementing data blending, across distinct DataSet instances. """<def_stmt>__init__ self primary_dataset secondary_dataset dimension_map<block_start>""" Constructor for a blended dataset. Contains all the fields to initialize the dataset. :param primary_dataset: (Required) The primary dataset, which table will be used as part of the FROM expression. This can be either a `DataSet` or another `DataSetBlender`, which means multiple DataSet instances can be blended by chaining together blenders. :param secondary_dataset: (Required) The dataset being blended. This should be a `DataSet` instance. (It might actually work with an instance of `DataSetBlender` as well, though.) :param dimension_map: A dict mapping up fields from the primary to the secondary dataset. This tells the Blender which fields can be used as dimensions in the Blender queries. """<line_sep>self.primary_dataset=primary_dataset<line_sep>self.secondary_dataset=secondary_dataset<line_sep>self.dimension_map=dimension_map<line_sep># Wrap all dataset fields with another field on top so that: # 1. DataSetBlender doesn't share a reference to a field with a DataSet (__hash__ is used to find out which # dataset the field is in - see the Field class' __hash__ method for more details) # 2. When complex fields are added, the `definition` attribute will always have at least one field within # its object graph self.fields=DataSet.Fields(ordered_distinct_list_by_attr([*_wrap_dataset_fields(primary_dataset) *_wrap_dataset_fields(secondary_dataset)] ))<line_sep># add query builder entry points self.query=DataSetBlenderQueryBuilder(self)<line_sep>self.latest=self.primary_dataset.latest<line_sep>self.annotation=<none><block_end>@property<def_stmt>return_additional_metadata self<arrow>bool# When using data blending, datasets are nested inside DataSetBlender objects. Additionally, # the primary_dataset can be a combination of datasets depending on how many datasets are being blended. # This helper property walks the tree to return the return_additional_metadata value from the original # primary dataset. <block_start>dataset=self.primary_dataset<while_stmt><not>isinstance(dataset DataSet)<block_start>dataset=dataset.primary_dataset<block_end><return>dataset.return_additional_metadata<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other DataSetBlender)<and>self.fields<eq>other.fields<block_end><def_stmt>__repr__ self<block_start><return>"BlendedDataSet(fields=[{}])".format(",".join([repr(f)<for>f self.fields]))<block_end><def_stmt>__hash__ self<block_start><return>hash((self.primary_dataset self.secondary_dataset self.fields))<block_end><def_stmt>__deepcopy__ self memodict={}<block_start><for_stmt>field self.dimension_map.values()<block_start>memodict[id(field)]=field<block_end><return>deepcopy(self memodict)<block_end>@property<def_stmt>table self<block_start><return><none><block_end>@property<def_stmt>database self<block_start><return>self.primary_dataset.database<block_end>@immutable<def_stmt>extra_fields self *fields<block_start><for_stmt>field fields<block_start>self.fields.add(field)<block_end><block_end><def_stmt>blend self other<block_start>""" Returns a Data Set blender which enables to execute queries on multiple data sets and combine them. """<line_sep><return>DataSetBlenderBuilder(self other)<block_end><block_end><class_stmt>DataSetBlenderBuilder<block_start><def_stmt>__init__ self primary secondary<block_start>self.primary_dataset=primary<line_sep>self.secondary_dataset=secondary<block_end><def_stmt>on self dimension_map<block_start><return>DataSetBlender(self.primary_dataset self.secondary_dataset dimension_map)<block_end><def_stmt>on_dimensions self<block_start>""" This function doesn't work when blending more than 2 datasets. It won't select dimensions in the 3rd dataset and further. self.primary_dataset might be a DataSetBlender object itself. We would want to dig deeper until we find the actual primary dataset. """<line_sep>dimension_map={}<for_stmt>secondary_ds_field self.secondary_dataset.fields<block_start>is_aggregate_field=secondary_ds_field.is_aggregate<line_sep>matches_alias_in_primary_dataset=secondary_ds_field.alias<in>self.primary_dataset.fields<if_stmt>is_aggregate_field<or><not>matches_alias_in_primary_dataset<block_start><continue><block_end>primary_ds_field=self.primary_dataset.fields[secondary_ds_field.alias]<line_sep>dimension_map[primary_ds_field]=secondary_ds_field<block_end><return>self.on(dimension_map)<block_end><block_end><class_stmt>DimensionChoicesBlenderQueryBuilder(DimensionChoicesQueryBuilder)<block_start><def_stmt>filter self *filters **kwargs<block_start>filters=[fltr.for_(fltr.field.definition)<for>fltr filters<if>fltr.field.definition<in>self.dataset.fields]<line_sep><return>super().filter(*filters **kwargs)<block_end><block_end>
<import_from_stmt>logging getLogger<import_stmt>numpy<as>np<import_from_stmt>scipy.optimize minimize<import_from_stmt>scipy.optimize LinearConstraint<import_from_stmt>.controller Controller<import_from_stmt>..envs.cost calc_cost<line_sep>logger=getLogger(__name__)<class_stmt>LinearMPC(Controller)<block_start>""" Model Predictive Controller for linear model Attributes: A (numpy.ndarray): system matrix, shape(state_size, state_size) B (numpy.ndarray): input matrix, shape(state_size, input_size) Q (numpy.ndarray): cost function weight for states R (numpy.ndarray): cost function weight for states history_us (list[numpy.ndarray]): time history of optimal input Ref: <NAME>. (2002). Predictive control: with constraints. """<def_stmt>__init__ self config model<block_start>""" Args: model (Model): system matrix, shape(state_size, state_size) config (ConfigModule): input matrix, shape(state_size, input_size) """<if_stmt>config.TYPE<ne>"Linear"<block_start><raise>ValueError("{} could be not applied to \ this controller".format(model))<block_end>super(LinearMPC self).__init__(config model)<line_sep># system parameters self.model=model<line_sep>self.A=model.A<line_sep>self.B=model.B<line_sep>self.state_size=config.STATE_SIZE<line_sep>self.input_size=config.INPUT_SIZE<line_sep>self.pred_len=config.PRED_LEN<line_sep># get cost func self.state_cost_fn=config.state_cost_fn<line_sep>self.terminal_state_cost_fn=config.terminal_state_cost_fn<line_sep>self.input_cost_fn=config.input_cost_fn<line_sep># cost parameters self.Q=config.Q<line_sep>self.R=config.R<line_sep>self.Qs=<none><line_sep>self.Rs=<none><line_sep># constraints self.dt_input_lower_bound=config.DT_INPUT_LOWER_BOUND<line_sep>self.dt_input_upper_bound=config.DT_INPUT_UPPER_BOUND<line_sep>self.input_lower_bound=config.INPUT_LOWER_BOUND<line_sep>self.input_upper_bound=config.INPUT_UPPER_BOUND<line_sep># setup controllers self.W=<none><line_sep>self.omega=<none><line_sep>self.F=<none><line_sep>self.f=<none><line_sep>self.setup()<line_sep>self.prev_sol=np.zeros(self.input_size<times>self.pred_len)<line_sep># history self.history_u=[np.zeros(self.input_size)]<block_end><def_stmt>setup self<block_start>""" setup Model Predictive Control as a quadratic programming """<line_sep>A_factorials=[self.A]<line_sep>self.phi_mat=self.A.copy()<for_stmt>_ range(self.pred_len-1)<block_start>temp_mat=np.matmul(A_factorials[-1] self.A)<line_sep>self.phi_mat=np.vstack((self.phi_mat temp_mat))<line_sep>A_factorials.append(temp_mat)<block_end># after we use this factorials self.gamma_mat=self.B.copy()<line_sep>gammma_mat_temp=self.B.copy()<for_stmt>i range(self.pred_len-1)<block_start>temp_1_mat=np.matmul(A_factorials[i] self.B)<line_sep>gammma_mat_temp=temp_1_mat+gammma_mat_temp<line_sep>self.gamma_mat=np.vstack((self.gamma_mat gammma_mat_temp))<block_end>self.theta_mat=self.gamma_mat.copy()<for_stmt>i range(self.pred_len-1)<block_start>temp_mat=np.zeros_like(self.gamma_mat)<line_sep>temp_mat[int((i+1)<times>self.state_size): :]=self.gamma_mat[:-int((i+1)<times>self.state_size) :]<line_sep>self.theta_mat=np.hstack((self.theta_mat temp_mat))<block_end># evaluation function weight diag_Qs=np.tile(np.diag(self.Q) self.pred_len)<line_sep>diag_Rs=np.tile(np.diag(self.R) self.pred_len)<line_sep>self.Qs=np.diag(diag_Qs)<line_sep>self.Rs=np.diag(diag_Rs)<line_sep># constraints # about inputs <if_stmt>self.input_lower_bound<is><not><none><block_start>self.F=np.zeros((self.input_size<times>2 self.pred_len<times>self.input_size))<for_stmt>i range(self.input_size)<block_start>self.F[i<times>2:(i+1)<times>2 i]=np.array([1. -1.])<line_sep>temp_F=self.F.copy()<block_end><for_stmt>i range(self.pred_len-1)<block_start><for_stmt>j range(self.input_size)<block_start>temp_F[j<times>2:(j+1)<times>2 ((i+1)<times>self.input_size)+j]=np.array([1. -1.])<block_end>self.F=np.vstack((self.F temp_F))<block_end>self.F1=self.F[: :self.input_size]<line_sep>temp_f=[]<for_stmt>i range(self.input_size)<block_start>temp_f.append(-1<times>self.input_upper_bound[i])<line_sep>temp_f.append(self.input_lower_bound[i])<block_end>self.f=np.tile(np.array(temp_f).flatten() self.pred_len)<block_end># about dt_input constraints <if_stmt>self.dt_input_lower_bound<is><not><none><block_start>self.W=np.zeros((2 self.pred_len<times>self.input_size))<line_sep>self.W[: 0]=np.array([1. -1.])<for_stmt>i range(self.pred_len<times>self.input_size-1)<block_start>temp_W=np.zeros((2 self.pred_len<times>self.input_size))<line_sep>temp_W[: i+1]=np.array([1. -1.])<line_sep>self.W=np.vstack((self.W temp_W))<block_end>temp_omega=[]<for_stmt>i range(self.input_size)<block_start>temp_omega.append(self.dt_input_upper_bound[i])<line_sep>temp_omega.append(-1.<times>self.dt_input_lower_bound[i])<block_end>self.omega=np.tile(np.array(temp_omega).flatten() self.pred_len)<block_end><block_end><def_stmt>obtain_sol self curr_x g_xs<block_start>""" calculate the optimal inputs Args: curr_x (numpy.ndarray): current state, shape(state_size, ) g_xs (numpy.ndarrya): goal trajectory, shape(plan_len+1, state_size) Returns: opt_input (numpy.ndarray): optimal input, shape(input_size, ) """<line_sep>temp_1=np.matmul(self.phi_mat curr_x.reshape(-1 1))<line_sep>temp_2=np.matmul(self.gamma_mat self.history_u[-1].reshape(-1 1))<line_sep>error=g_xs[1:].reshape(-1 1)-temp_1-temp_2<line_sep>G=np.matmul(self.theta_mat.T np.matmul(self.Qs error))<line_sep>H=np.matmul(self.theta_mat.T np.matmul(self.Qs self.theta_mat))+self.Rs<line_sep>H=H<times>0.5<line_sep># constraints A=[]<line_sep>b=[]<if_stmt>self.W<is><not><none><block_start>A.append(self.W)<line_sep>b.append(self.omega.reshape(-1 1))<block_end><if_stmt>self.F<is><not><none><block_start>b_F=-np.matmul(self.F1 self.history_u[-1].reshape(-1 1))-self.f.reshape(-1 1)<line_sep>A.append(self.F)<line_sep>b.append(b_F)<block_end>A=np.array(A).reshape(-1 self.input_size<times>self.pred_len)<line_sep>ub=np.array(b).flatten()<line_sep># using cvxopt <def_stmt>optimized_func dt_us<block_start><return>(np.dot(dt_us np.dot(H dt_us.reshape(-1 1)))-np.dot(G.T dt_us.reshape(-1 1)))[0]<block_end># constraint lb=np.array([-np.inf<for>_ range(len(ub))])# one side cons cons=LinearConstraint(A lb ub)<line_sep># solve opt_sol=minimize(optimized_func self.prev_sol.flatten() constraints=[cons])<line_sep>opt_dt_us=opt_sol.x<line_sep>""" using cvxopt ver, if you want to solve more quick please use cvxopt instead of scipy # make cvxpy problem formulation P = 2*matrix(H) q = matrix(-1 * G) A = matrix(A) b = matrix(ub) # solve the problem opt_sol = solvers.qp(P, q, G=A, h=b) opt_dt_us = np.array(list(opt_sol['x'])) """<line_sep># to dt form opt_dt_u_seq=np.cumsum(opt_dt_us.reshape(self.pred_len self.input_size) axis=0)<line_sep>self.prev_sol=opt_dt_u_seq.copy()<line_sep>opt_u_seq=opt_dt_u_seq+self.history_u[-1]<line_sep># save self.history_u.append(opt_u_seq[0])<line_sep># check costs costs=self.calc_cost(curr_x opt_u_seq.reshape(1 self.pred_len self.input_size) g_xs)<line_sep>logger.debug("Cost = {}".format(costs))<line_sep><return>opt_u_seq[0]<block_end><def_stmt>__str__ self<block_start><return>"LinearMPC"<block_end><block_end>
<def_stmt>test <block_start><import_stmt>spacy.tokens<import_stmt>spacy.lang.en<assert_stmt>isinstance(nlp spacy.lang.en.English) "nlpオブジェクトはEnglishクラスのインスタンスでなければなりません"<assert_stmt>isinstance(doc spacy.tokens.Doc) "テキストをnlpオブジェクトで処理してdocを作成しましたか?"<assert_stmt>"print(doc.text)"<in>__solution__ "doc.textをプリントしましたか?"<line_sep>__msg__.good("正解です!")<block_end>
<import_stmt>os<import_from_stmt>types SimpleNamespace<line_sep>cfg=SimpleNamespace(**{})<line_sep># data path cfg.data_dir="/workspace/data/ranzcr/"<line_sep>cfg.data_folder=cfg.data_dir+"train/"<line_sep>cfg.train_df=cfg.data_dir+"train_folds.csv"<line_sep>cfg.test_df=cfg.data_dir+"sample_submission.csv"<line_sep>cfg.output_dir="./output/weights/"<line_sep># dataset cfg.batch_size=4<line_sep>cfg.img_size=(896 896)<line_sep>cfg.train_aug=<none><line_sep>cfg.val_aug=<none><line_sep>cfg.label_cols=["ETT - Abnormal" "ETT - Borderline" "ETT - Normal" "NGT - Abnormal" "NGT - Borderline" "NGT - Incompletely Imaged" "NGT - Normal" "CVC - Abnormal" "CVC - Borderline" "CVC - Normal" "Swan Ganz Catheter Present" ]<line_sep>cfg.num_classes=len(cfg.label_cols)<line_sep># mask cfg.thickness=32<line_sep>cfg.seg_weight=50<line_sep># model cfg.backbone="tf_efficientnet_b8_ap"<line_sep>cfg.pretrained=<true><line_sep>cfg.pretrained_weights=<none><line_sep>cfg.train=<true><line_sep>cfg.seg_dim=3<line_sep>cfg.image_extension=".jpg"<line_sep># training cfg.fold=-1<line_sep>cfg.lr=1e-4<line_sep>cfg.weight_decay=0<line_sep>cfg.epochs=15<line_sep>cfg.seed=-1<line_sep>cfg.calc_loss=<true><line_sep>cfg.train_val=<false><line_sep>cfg.eval_epochs=1<line_sep>cfg.eval_train_epochs=20<line_sep>cfg.warmup=5<line_sep>cfg.compute_auc=<true><line_sep># ressources cfg.find_unused_parameters=<true><line_sep>cfg.mixed_precision=<true><line_sep>cfg.grad_accumulation=1<line_sep>cfg.gpu=0<line_sep>cfg.device="cuda:%d"%cfg.gpu<line_sep>cfg.num_workers=8<line_sep>cfg.drop_last=<true><line_sep>basic_cfg=cfg<line_sep>
<import_from_stmt>itertools chain<import_from_stmt>unittest.mock Mock<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>lhotse MonoCut SupervisionSegment<import_from_stmt>lhotse.cut PaddingCut<import_from_stmt>lhotse.supervision AlignmentItem<import_from_stmt>lhotse.utils LOG_EPSILON<class_stmt>TestMasksWithoutSupervisions<block_start><def_stmt>test_cut_audio_mask self<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 recording=Mock(sampling_rate=16000))<line_sep>mask=cut.supervisions_audio_mask()<assert_stmt>mask.sum()<eq>0<block_end><def_stmt>test_cut_features_mask self<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 features=Mock(sampling_rate=16000 frame_shift=0.01 num_frames=2000) )<line_sep>mask=cut.supervisions_feature_mask()<assert_stmt>mask.sum()<eq>0<block_end><def_stmt>test_padding_cut_audio_mask self<block_start>cut=PaddingCut("cut" duration=2 sampling_rate=16000 feat_value=LOG_EPSILON num_samples=32000 )<line_sep>mask=cut.supervisions_audio_mask()<assert_stmt>mask.sum()<eq>0<block_end><def_stmt>test_padding_cut_features_mask self<block_start>cut=PaddingCut("cut" duration=2 sampling_rate=16000 feat_value=LOG_EPSILON num_frames=2000 num_features=13 )<line_sep>mask=cut.supervisions_feature_mask()<assert_stmt>mask.sum()<eq>0<block_end><def_stmt>test_mixed_cut_audio_mask self<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 recording=Mock(sampling_rate=16000))<line_sep>mixed_cut=cut.append(cut)<line_sep>mask=mixed_cut.supervisions_audio_mask()<assert_stmt>mask.sum()<eq>0<block_end><def_stmt>test_mixed_cut_features_mask self<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 features=Mock(sampling_rate=16000 frame_shift=0.01) )<line_sep>mixed_cut=cut.append(cut)<line_sep>mask=mixed_cut.supervisions_feature_mask()<assert_stmt>mask.sum()<eq>0<block_end><block_end>@pytest.fixture<def_stmt>supervisions <block_start><return>[SupervisionSegment("sup" "rec" start=0 duration=0.5 speaker="SpkA" alignment={"word":[AlignmentItem(symbol="a" start=0 duration=0.1) AlignmentItem(symbol="b" start=0.2 duration=0.2) ]} ) SupervisionSegment("sup" "rec" start=0.6 duration=0.2 speaker="SpkB" alignment={"word":[AlignmentItem(symbol="a" start=0.6 duration=0.2) ]} ) ]<block_end><class_stmt>TestMasksWithSupervisions<block_start>@pytest.mark.parametrize("alignment" [<none> "word"])<def_stmt>test_cut_audio_mask self supervisions alignment<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 recording=Mock(sampling_rate=16000) supervisions=supervisions )<line_sep>mask=cut.supervisions_audio_mask(use_alignment_if_exists=alignment)<if_stmt>alignment<eq>"word"<block_start>ones=np.index_exp[list(chain(range(0 1600) range(3200 6400) range(9600 12800)))]<line_sep>zeros=np.index_exp[list(chain(range(1600 3200) range(6400 9600) range(12800 32000)))]<block_end><else_stmt><block_start>ones=np.index_exp[list(chain(range(0 8000) range(9600 12800)))]<line_sep>zeros=np.index_exp[list(chain(range(8000 9600) range(12800 32000)))]<block_end><assert_stmt>(mask[ones]<eq>1).all()<assert_stmt>(mask[zeros]<eq>0).all()<block_end>@pytest.mark.parametrize("alignment" [<none> "word"])<def_stmt>test_cut_features_mask self supervisions alignment<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 features=Mock(sampling_rate=16000 frame_shift=0.01 num_frames=2000) supervisions=supervisions )<line_sep>mask=cut.supervisions_feature_mask(use_alignment_if_exists=alignment)<if_stmt>alignment<eq>"word"<block_start>ones=np.index_exp[list(chain(range(0 10) range(20 40) range(60 80)))]<line_sep>zeros=np.index_exp[list(chain(range(10 20) range(40 60) range(80 200)))]<block_end><else_stmt><block_start>ones=np.index_exp[list(chain(range(0 50) range(60 80)))]<line_sep>zeros=np.index_exp[list(chain(range(50 60) range(80 200)))]<block_end><assert_stmt>(mask[ones]<eq>1).all()<assert_stmt>(mask[zeros]<eq>0).all()<block_end>@pytest.mark.parametrize("alignment" [<none> "word"])<def_stmt>test_cut_speakers_audio_mask self supervisions alignment<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 recording=Mock(sampling_rate=16000) supervisions=supervisions )<line_sep>mask=cut.speakers_audio_mask(use_alignment_if_exists=alignment)<if_stmt>alignment<eq>"word"<block_start>ones=[np.index_exp[list(chain(range(0 1600) range(3200 6400)))] np.index_exp[list(chain(range(9600 12800)))] ]<line_sep>zeros=[np.index_exp[list(chain(range(1600 3200) range(6400 32000)))] np.index_exp[list(chain(range(0 9600) range(12800 32000)))] ]<block_end><else_stmt><block_start>ones=[np.index_exp[range(0 8000)] np.index_exp[range(9600 12800)]]<line_sep>zeros=[np.index_exp[list(chain(range(8000 32000)))] np.index_exp[list(chain(range(0 9600) range(12800 32000)))] ]<block_end><assert_stmt>(mask[0 ones[0]]<eq>1).all()<assert_stmt>(mask[1 ones[1]]<eq>1).all()<assert_stmt>(mask[0 zeros[0]]<eq>0).all()<assert_stmt>(mask[1 zeros[1]]<eq>0).all()<block_end>@pytest.mark.parametrize("alignment" [<none> "word"])<def_stmt>test_cut_speakers_features_mask self supervisions alignment<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 features=Mock(sampling_rate=16000 frame_shift=0.01 num_frames=2000) supervisions=supervisions )<line_sep>mask=cut.speakers_feature_mask(use_alignment_if_exists=alignment)<if_stmt>alignment<eq>"word"<block_start>ones=[np.index_exp[list(chain(range(0 10) range(20 40)))] np.index_exp[list(chain(range(60 80)))] ]<line_sep>zeros=[np.index_exp[list(chain(range(10 20) range(40 200)))] np.index_exp[list(chain(range(0 60) range(80 200)))] ]<block_end><else_stmt><block_start>ones=[np.index_exp[list(chain(range(0 50)))] np.index_exp[list(chain(range(60 80)))] ]<line_sep>zeros=[np.index_exp[list(chain(range(50 200)))] np.index_exp[list(chain(range(0 60) range(80 200)))] ]<block_end><assert_stmt>(mask[0 ones[0]]<eq>1).all()<assert_stmt>(mask[1 ones[1]]<eq>1).all()<assert_stmt>(mask[0 zeros[0]]<eq>0).all()<assert_stmt>(mask[1 zeros[1]]<eq>0).all()<block_end><def_stmt>test_mixed_cut_audio_mask self supervisions<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 recording=Mock(sampling_rate=16000) supervisions=supervisions )<line_sep>mixed_cut=cut.append(cut)<line_sep>mask=mixed_cut.supervisions_audio_mask()<line_sep>ones=np.index_exp[list(chain(range(0 8000) range(9600 12800) range(32000 40000) range(41600 44800) ))]<line_sep>zeros=np.index_exp[list(chain(range(8000 9600) range(12800 32000) range(40000 41600) range(44800 64000) ))]<assert_stmt>(mask[ones]<eq>1).all()<assert_stmt>(mask[zeros]<eq>0).all()<block_end><def_stmt>test_mixed_cut_features_mask self supervisions<block_start>cut=MonoCut("cut" start=0 duration=2 channel=0 features=Mock(sampling_rate=16000 frame_shift=0.01) supervisions=supervisions )<line_sep>mixed_cut=cut.append(cut)<line_sep>mask=mixed_cut.supervisions_feature_mask()<line_sep>ones=np.index_exp[list(chain(range(0 50) range(60 80) range(200 250) range(260 280)))]<line_sep>zeros=np.index_exp[list(chain(range(50 60) range(80 200) range(250 260) range(280 400)))]<assert_stmt>(mask[ones]<eq>1).all()<assert_stmt>(mask[zeros]<eq>0).all()<block_end><block_end>
<import_stmt>argparse<import_stmt>os<import_stmt>typing<import_stmt>functools<line_sep>parser=argparse.ArgumentParser(description='Convert tsv to lu')<line_sep>parser.add_argument('file' type=str)<line_sep>parser.add_argument('-s' '--source' type=str default='custom editorial')<line_sep>parser.add_argument('-o' '--out' type=str default=<none>)<line_sep>args=parser.parse_args()<with_stmt>open(args.file 'r' encoding='utf-8')<as>fin# TODO skip first line <block_start>lines=fin.readlines()[1:]<block_end><class_stmt>Questions<block_start><def_stmt>__init__ self source:str metadata:str<block_start>self.questions=[]<line_sep>self.source=source<if_stmt>metadata<block_start>metadata=metadata.split(':')<line_sep>self.metadatas=[[metadata[0] metadata[1]]]<block_end><else_stmt><block_start>self.metadatas=[]<block_end><block_end><def_stmt>WriteToFile self fout:typing.IO answer:str<block_start><def_stmt>writeLine *args<block_start><for_stmt>arg args<block_start>fout.write(arg)<block_end>fout.write('\n')<block_end>writeLine('> Source: ' self.source)<line_sep>writeLine('## ? ' self.questions[0])<for_stmt>i range(1 len(self.questions))<block_start>writeLine('- ' self.questions[i])<block_end>writeLine()<if_stmt>self.metadatas<block_start>writeLine('**Filters:**')<for_stmt>metadata self.metadatas<block_start>writeLine('- {0} = {1}'.format(metadata[0] metadata[1]))<block_end>writeLine()<block_end>writeLine('```markdown')<line_sep>writeLine(answer)<line_sep>writeLine('```')<line_sep>writeLine()<block_end><block_end>answerToQuestions:typing.Dict[str Questions]={}<for_stmt>line lines<block_start>line=line.split('\t')<line_sep>question=line[0]<line_sep>answer=line[1]<line_sep>source=line[2].strip()<if>len(line)<ge>3<else>parser.source<line_sep>metadata=line[3].strip()<if>len(line)<ge>4<else><none><line_sep>questions=answerToQuestions.setdefault(answer Questions(source metadata))<line_sep>questions.questions.append(question)<block_end>print('lines {0} answers {1} questions {2}'.format(len(lines) len(answerToQuestions) functools.reduce(<lambda>a b:len(a.questions)+len(b.questions)<if>isinstance(a Questions)<else>a+len(b.questions) answerToQuestions.values())))<with_stmt>open(args.out<if>args.out<else>args.file+'.qna' 'w' encoding='utf-8')<as>fout<block_start><for_stmt>k,v answerToQuestions.items()<block_start>v.WriteToFile(fout k)<block_end><block_end>
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license. # See LICENSE in the project root for license information. <import_from_stmt>falcon HTTPNotFound<import_from_stmt>...auth login_required check_team_auth<import_from_stmt>... db<import_from_stmt>...utils unsubscribe_notifications create_audit<import_from_stmt>...constants ADMIN_DELETED<line_sep>@login_required<def_stmt>on_delete req resp team user<block_start>""" Delete team admin user. Removes admin from the team if he/she is not a member of any roster. **Example request:** .. sourcecode:: http DELETE /api/v0/teams/team-foo/admins/jdoe HTTP/1.1 :statuscode 200: Successful delete :statuscode 404: Team admin not found """<line_sep>check_team_auth(team req)<line_sep>connection=db.connect()<line_sep>cursor=connection.cursor()<line_sep>cursor.execute('''DELETE FROM `team_admin` WHERE `team_id`=(SELECT `id` FROM `team` WHERE `name`=%s) AND `user_id`=(SELECT `id` FROM `user` WHERE `name`=%s)''' (team user))<line_sep>deleted=cursor.rowcount<if_stmt>deleted<eq>0<block_start><raise>HTTPNotFound()<block_end>create_audit({'user':user} team ADMIN_DELETED req cursor)<line_sep># Remove user from the team if needed query='''DELETE FROM `team_user` WHERE `user_id` = (SELECT `id` FROM `user` WHERE `name`=%s) AND `user_id` NOT IN (SELECT `roster_user`.`user_id` FROM `roster_user` JOIN `roster` ON `roster`.`id` = `roster_user`.`roster_id` WHERE team_id = (SELECT `id` FROM `team` WHERE `name`=%s) UNION (SELECT `user_id` FROM `team_admin` WHERE `team_id` = (SELECT `id` FROM `team` WHERE `name`=%s))) AND `team_user`.`team_id` = (SELECT `id` FROM `team` WHERE `name` = %s)'''<line_sep>cursor.execute(query (user team team team))<if_stmt>cursor.rowcount<ne>0<block_start>unsubscribe_notifications(team user cursor)<block_end>connection.commit()<line_sep>cursor.close()<line_sep>connection.close()<block_end>
# # Copyright 2019 <NAME> <<EMAIL>> # # This file is part of Salus # (see https://github.com/SymbioticLab/Salus). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>collections defaultdict<import_stmt>re<import_stmt>pandas<import_stmt>plotutils<as>pu<def_stmt>check_threadpool path name='Threadpool'<block_start>pat=re.compile(name+r' (?P<evt>start|end) to run seq (?P<seq>\d+)')<with_stmt>open(path)<as>f<block_start>lines=f.readlines()<block_end>evts=[pat.search(line).groups()<for>line lines<if>pat.search(line)]<line_sep>print('evts num: {}'.format(len(evts)))<line_sep>r=set()<for_stmt>evt,seq evts<block_start><if_stmt>evt<eq>'start'<block_start>r.add(seq)<block_end><else_stmt><block_start>r.remove(seq)<block_end><block_end><return>r<block_end><def_stmt>check_pending_ops path<block_start>kernels=defaultdict(int)<line_sep>lines=defaultdict(list)<line_sep>kernel_type={}<line_sep>ptn_st=re.compile(r'''Process node: (?P<node>[^ \[]+) = (?P<kernel>[^\[]+)''')<line_sep>ptn_ed=re.compile("Propagate outputs for node: (?P<node>.+)")<with_stmt>open(path)<as>f<block_start><for_stmt>line f<block_start>line=line.rstrip('\n')<line_sep>m=ptn_st.search(line)<if_stmt>m<block_start>kernels[m.group('node')]<augadd>1<line_sep>lines[m.group('node')].append(line)<line_sep>kernel_type[m.group('node')]=m.group('kernel')<block_end>m=ptn_ed.search(line)<if_stmt>m<block_start><if_stmt>kernels[m.group('node')]<eq>0<block_start><raise>ValueError("Unknown kernel name: " m.group('node') line)<block_end>kernels[m.group('node')]<augsub>1<block_end><block_end><block_end>remaining=[('{}[{}]'.format(k kernel_type[k]) v)<for>k,v kernels.items()<if>v<ne>0]<line_sep>print(remaining)<line_sep><return>remaining kernel_type lines<block_end><def_stmt>check_kernel_create path<block_start>kernels={}<line_sep>ptn_create=re.compile(r'''Created kernel: (?P<kernel>\w+) (?P<op>.+)''')<line_sep>ptn_find=re.compile(r'''Found cached kernel: (?P<kernel>\w+) (?P<op>.+)''')<line_sep>ptn_delete=re.compile(r'''Deleted kernel: (?P<kernel>\w+) (?P<op>.+)''')<with_stmt>open(path)<as>f<block_start><for_stmt>line f<block_start>line=line.rstrip('\n')<line_sep>m=ptn_create.search(line)<if_stmt>m<block_start>kernels[m.group('kernel')]=m.group('op')<block_end>m=ptn_find.search(line)<if_stmt>m<block_start>addr=m.group('kernel')<if_stmt>addr<not><in>kernels<block_start><raise>ValueError('Found nonexist kernel: ' addr m.group('op'))<block_end><if_stmt>kernels[addr]<ne>m.group('op')<block_start><raise>ValueError('Found kernel changed op: ' addr kernels[addr] m.group('op'))<block_end><block_end>m=ptn_delete.search(line)<if_stmt>m<block_start>addr=m.group('kernel')<if_stmt>addr<not><in>kernels<block_start><raise>ValueError('Delete nonexist kernel: ' addr m.group('op'))<block_end><if_stmt>kernels[addr]<ne>m.group('op')<block_start><raise>ValueError('Delete kernel changed op: ' addr kernels[addr] m.group('op'))<block_end><del_stmt>kernels[addr]<block_end><block_end><block_end><return>kernels<block_end><def_stmt>check_iter_create path<block_start>iters=defaultdict(list)<line_sep>ptn_create=re.compile(r'''Created iteration (?P<graphId>\d+) for graph (?P<gh>\w+) in session (?P<sess>\w+)''')<line_sep>ptn_running=re.compile(r'''Running iteration (?P<sess>\w+):(?P<graphId>\d+)''')<line_sep>ptn_finish=re.compile(r'''(?P<sess>\w+):(?P<gh>\w+):(?P<graphId>\d+) finish iteration''')<with_stmt>open(path)<as>f<block_start><for_stmt>line f<block_start>line=line.rstrip('\n')<line_sep>m=ptn_create.search(line)<if_stmt>m<block_start>l=iters[m.group('graphId')]<if_stmt>l<and>l[-1]<ne>2<block_start>print('Iteration {} created while it is running'.format(m.group('graphId')))<block_end>l.append(0)<block_end>m=ptn_running.search(line)<if_stmt>m<block_start>l=iters[m.group('graphId')]<if_stmt>l<and>l[-1]<ne>0<block_start>print('Iteration {} running while it is not created'.format(m.group('graphId')))<block_end>l.append(1)<block_end>m=ptn_finish.search(line)<if_stmt>m<block_start>l=iters[m.group('graphId')]<if_stmt>l<and>l[-1]<ne>1<block_start>print('Iteration {} stopped while it is not running'.format(m.group('graphId')))<block_end>l.append(2)<block_end><block_end><block_end><return>iters<block_end><def_stmt>check_part_nodes path<block_start>nodes=defaultdict(list)<line_sep># Node 1 in graphHandle=0000000000000001, graphId=1: _SINK = NoOp[] ptn_node=re.compile(r'''Node (?P<nid>\d+) in graphHandle=(?P<gh>\w+), graphId=(?P<graphId>\d+): (?P<name>[\w/_]+) = (?P<kernel>[^[]+)''')<with_stmt>open(path)<as>f<block_start><for_stmt>line f<block_start>line=line.rstrip('\n')<line_sep>m=ptn_node.search(line)<if_stmt>m<block_start>nodes[m.group('graphId')].append({'nid':int(m.group('nid')) 'name':m.group('name') 'kernel':m.group('kernel') 'gh':m.group('gh')})<block_end><block_end><block_end><return>nodes<block_end><def_stmt>check_mem_alloc path<block_start>allocs={}<line_sep># TFAllocator called for attributes tensorflow::AllocationAttributes(allocation_will_be_logged=1, no_retry_on_failure=1) of 4194304 bytes of memory at 0x1023e200000 with alignment 32 using allocator GPU_0_smallopt@0x10d9c1b0 with AllocationTicket(2988, device=GPU:0, sess=1e0e80dcd81c1d05) ptn_alloc=re.compile(r'''^.*TFAllocator called .* of (?P<size>\d+) bytes of memory at (?P<origin>0x[a-f0-9]+) with.*sess=(?P<sess>\w+)\)$''')<line_sep># [I] TFAllocator deallocating memory at 0x1021c1a4500 size 37632 using allocator GPU_0_bfc@0x11c82000 with AllocationTicket(3854, device=GPU:0, sess=4f03d23010531445) ptn_dealloc=re.compile(r'''^.*TFAllocator deallocating memory at (?P<origin>\w+) size (?P<size>\d+) using.*sess=(?P<sess>\w+)\)$''')<with_stmt>pu.pbopen(path)<as>f<block_start><for_stmt>line f<block_start>line=line.rstrip('\n')<line_sep>m=ptn_alloc.search(line)<if_stmt>m<block_start>origin=int(m.group('origin') 16)<line_sep>size=int(m.group('size'))<line_sep>allocs[origin]={'origin':origin 'size':size 'sess':m.group('sess')}<block_end>m=ptn_dealloc.search(line)<if_stmt>m<block_start>origin=int(m.group('origin') 16)<line_sep>size=int(m.group('size'))<if_stmt>origin<not><in>allocs<block_start><raise>ValueError('Unknown deallocation: '+line)<block_end><if_stmt>allocs[origin]['size']<ne>size<block_start><raise>ValueError('Mismatch size'+line)<block_end><del_stmt>allocs[origin]<block_end><block_end><block_end><return>allocs<block_end>
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. <import_stmt>time<class_stmt>BenchmarkTimer(object)<block_start><def_stmt>__init__ self<block_start>self.start_timer_step=0<line_sep>self.end_timer_step=100001<line_sep>self.cur_step=0<line_sep>self.total_time=0.0<line_sep>self.step_start=0.0<block_end><def_stmt>set_start_step self step<block_start>self.start_timer_step=step<block_end><def_stmt>time_begin self<block_start>self.cur_step<augadd>1<if_stmt>self.cur_step<g>self.start_timer_step<block_start>self.step_start=time.time()<block_end><block_end><def_stmt>time_end self<block_start><if_stmt>self.cur_step<g>self.start_timer_step<block_start>end=time.time()<line_sep>self.total_time<augadd>end-self.step_start<block_end><block_end><def_stmt>time_per_step self<block_start><if_stmt>self.cur_step<le>self.start_timer_step<block_start><return>0.0<block_end><return>self.total_time/(self.cur_step-self.start_timer_step)<block_end><block_end>
<import_from_stmt>project branch<import_from_stmt>project hello<if_stmt>__name__<eq>'__main__'<block_start>hello()<line_sep>branch(<false> <true>)<line_sep>branch(<true> <true>)<block_end>
# -*- coding: utf-8 -*- # # This file is part of PyBuilder # # Copyright 2011-2020 PyBuilder Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>shutil rmtree<import_from_stmt>tempfile mkdtemp<import_from_stmt>pybuilder.python_utils iglob makedirs<import_from_stmt>pybuilder.utils jp<class_stmt>TestPythonGlobTest(unittest.TestCase)<block_start><def_stmt>touch self f<block_start><with_stmt>open(f "wb")<as>f<block_start><pass><block_end><block_end><def_stmt>setUp self<block_start>self.tmp_dir=mkdtemp()<line_sep>makedirs(jp(self.tmp_dir "a" "b"))<line_sep>self.touch(jp(self.tmp_dir "x.py"))<line_sep>self.touch(jp(self.tmp_dir "a" "y.py"))<line_sep>self.touch(jp(self.tmp_dir "a" "b" "z.py"))<block_end><def_stmt>tearDown self<block_start>rmtree(self.tmp_dir)<block_end><def_stmt>test_iglob self<block_start>self.assertEqual(list(iglob(jp(self.tmp_dir "*.py"))) [jp(self.tmp_dir "x.py")])<line_sep>self.assertEqual(list(iglob(jp(self.tmp_dir "**" "*.py") recursive=<true>)) [jp(self.tmp_dir "x.py") jp(self.tmp_dir "a" "y.py") jp(self.tmp_dir "a" "b" "z.py")])<block_end><block_end>
<import_stmt>random<import_stmt>numpy<as>np<import_stmt>os<import_stmt>pytest<import_from_stmt>ding.utils.plot_helper plot<line_sep>@pytest.mark.unittest<def_stmt>test_plot <block_start>rewards1=np.array([0 0.1 0 0.2 0.4 0.5 0.6 0.9 0.9 0.9])<line_sep>rewards2=np.array([0 0 0.1 0.4 0.5 0.5 0.55 0.8 0.9 1])<line_sep>rewards=np.concatenate((rewards1 rewards2))# concatenation array episode1=range(len(rewards1))<line_sep>episode2=range(len(rewards2))<line_sep>episode=np.concatenate((episode1 episode2))<line_sep>data1={}<line_sep>data1['x']=episode<line_sep>data1['y']=rewards<line_sep>data1['label']='line1'<line_sep>rewards3=np.random.random(10)<line_sep>rewards4=np.random.random(10)<line_sep>rewards=np.concatenate((rewards3 rewards4))# concatenation array episode3=range(len(rewards1))<line_sep>episode4=range(len(rewards2))<line_sep>episode=np.concatenate((episode3 episode4))<line_sep>data2={}<line_sep>data2['x']=episode<line_sep>data2['y']=rewards<line_sep>data2['label']='line2'<line_sep>data=[data1 data2]<line_sep>plot(data 'step' 'reward_rate' 'test_pic' './pic.jpg')<assert_stmt>os.path.exists('./pic.jpg')<block_end>
#encoding:utf-8 subreddit='chessmemes'<line_sep>t_channel='@chessmemesenglish'<def_stmt>send_post submission r2t<block_start><return>r2t.send_simple(submission)<block_end>
<import_from_stmt>.. BaseApi NamedEndpoint<import_from_stmt>.urls ClashApiV1Urls<class_stmt>ClashApiV1(NamedEndpoint)<block_start>""" This class wraps the Clash-v1 endpoint calls provided by the Riot API. See https://developer.riotgames.com/apis#clash-v1 for more detailed information """<def_stmt>__init__ self base_api:BaseApi<block_start>""" Initialize a new ClashApiV1 which uses the provided base_api :param BaseApi base_api: the root API object to use for making all requests. """<line_sep>super().__init__(base_api self.__class__.__name__)<block_end><def_stmt>by_summoner self region:str summoner_id:str<block_start>""" This endpoint returns a list of active Clash players for a given summoner ID. If a summoner registers for multiple tournaments at the same time (e.g., Saturday and Sunday) then both registrations would appear in this list. :param string region: The region to execute this request on. :param string summoner_id: The summoner ID. :returns: List[PlayerDTO]: represents the summoner's info for the current clash. """<line_sep><return>self._request_endpoint(self.by_summoner.__name__ region ClashApiV1Urls.by_summoner summoner_id=summoner_id )<block_end><def_stmt>by_team self region:str team_id:str<block_start>""" Get team by ID. :param string region: The region to execute this request on :param string team_id: Team ID :returns: TeamDTO: represents a clash team """<line_sep><return>self._request_endpoint(self.by_team.__name__ region ClashApiV1Urls.by_team team_id=team_id )<block_end><def_stmt>tournaments self region:str<block_start>""" Returns a list of active and upcoming tournaments. :param string region: The region to execute this request on :returns: List[TournamentDTO]: represents all of the current tournaments active """<line_sep><return>self._request_endpoint(self.tournaments.__name__ region ClashApiV1Urls.tournaments )<block_end><def_stmt>tournament_by_team self region:str team_id:str<block_start>""" Get tournament by team ID. :param string region: The region to execute this request on :param string team_id: Team ID :returns: TournamentDTO: represents a clash tournament """<line_sep><return>self._request_endpoint(self.tournament_by_team.__name__ region ClashApiV1Urls.tournament_by_team team_id=team_id )<block_end><def_stmt>by_tournament self region:str tournament_id:str<block_start>""" Get tournament by ID. :param string region: The region to execute this request on :param string tournament_id: Tournament ID :returns: TournamentDTO: represents a clash tournament """<line_sep><return>self._request_endpoint(self.by_tournament.__name__ region ClashApiV1Urls.by_tournament tournament_id=tournament_id )<block_end><block_end>
<def_stmt>enabled <block_start><return><true><block_end><def_stmt>title <block_start><return>"Ctrl+Alt+Del"<block_end><def_stmt>subtitle <block_start><return>"View the latest Ctrl+Alt+Del strip"<block_end><def_stmt>run <block_start><import_stmt>os<import_stmt>re<line_sep>content=os.popen("""curl -s http://www.cad-comic.com/cad/""").read().rstrip()<line_sep>strip=re.match(r'.*?src="(http://v.cdn.cad-comic.com/comics/cad.*?)"' content re.IGNORECASE|re.S).groups(0)[0]<line_sep>os.system('curl -s '+strip+' --O strip.png')<line_sep>os.system('qlmanage -p strip.png')<block_end>
<import_from_stmt>office365.runtime.client_path ClientPath<class_stmt>ResourcePathUrl(ClientPath)<block_start>"""Resource path for OneDrive path-based addressing"""<def_stmt>__init__ self rel_url parent<block_start>""" :param str rel_url: File or Folder relative url :type parent: office365.runtime.client_path.ClientPath """<line_sep>super(ResourcePathUrl self).__init__(parent)<line_sep>self._url=rel_url<line_sep>self._nested=<false><block_end>@property<def_stmt>segments self<block_start>delimiter="/"<if>self._nested<else>":/"<if_stmt>isinstance(self.parent ResourcePathUrl)<block_start>self.parent._nested=<true><line_sep><return>[self._url delimiter]<block_end><else_stmt><block_start><return>[self.delimiter self._url delimiter]<block_end><block_end>@property<def_stmt>delimiter self<block_start><return>":/"<block_end>@property<def_stmt>name self<block_start><return>self._url<block_end><block_end>
<import_from_stmt>simple_salesforce.exceptions SalesforceMalformedRequest<import_from_stmt>cumulusci.core.tasks BaseSalesforceTask<import_from_stmt>cumulusci.core.utils process_bool_arg<import_from_stmt>cumulusci.tasks.salesforce.BaseSalesforceApiTask BaseSalesforceApiTask<class_stmt>CheckMyDomainActive(BaseSalesforceTask)<block_start><def_stmt>_run_task self<block_start>self.return_values=(".my."<in>self.org_config.instance_url<or>".cloudforce.com"<in>self.org_config.instance_url)<line_sep>self.logger.info(f"Completed My Domain preflight check with result {self.return_values}")<block_end><block_end><class_stmt>CheckSettingsValue(BaseSalesforceApiTask)<block_start>task_options={"settings_type":{"description":"The API name of the Settings entity to be checked, such as ChatterSettings." "required":<true> } "settings_field":{"description":"The API name of the field on the Settings entity to check." "required":<true> } "value":{"description":"The value to check for" "required":<true>} "treat_missing_as_failure":{"description":"If True, treat a missing Settings entity as a preflight failure, instead of raising an exception. Defaults to False." "required":<false> } }<def_stmt>_run_task self<block_start>field=self.options["settings_field"]<line_sep>entity=self.options["settings_type"]<try_stmt><block_start>results=self.tooling.query(f"SELECT {field} FROM {entity}")["records"]<block_end><except_stmt>SalesforceMalformedRequest<as>e<block_start>self.logger.error(f"The settings value {entity}.{field} could not be queried: {e}")<line_sep>self.return_values=<false><if_stmt><not>process_bool_arg(self.options.get("treat_missing_as_failure" <false>))<block_start><raise>e<block_end><return><block_end><if_stmt><not>results<block_start>self.logger.info("Located no Settings records. Returning negative preflight result.")<line_sep>self.return_values=<false><line_sep><return><block_end>value=results[0].get(self.options["settings_field"])<line_sep># Type-sensitive compare. <if_stmt>type(value)<is>bool<block_start>comparand=process_bool_arg(self.options["value"])<block_end><elif_stmt>type(value)<is>float<block_start>comparand=float(self.options["value"])<block_end><elif_stmt>type(value)<is>int<block_start>comparand=int(self.options["value"])<block_end><else_stmt><block_start>comparand=self.options["value"]<block_end>self.return_values=value<eq>comparand<line_sep>self.logger.info(f"Completed Settings preflight check with result {self.return_values}")<block_end><block_end>
<import_stmt>statistics<import_from_stmt>..base_population BasePopulation<class_stmt>CoDeepNEATPopulation(BasePopulation)<block_start>""" Population class of the CoDeepNEAT algorithm that holds all relevant population information in a single place to ease summary, serialization and deserialization. """<def_stmt>__init__ self initial_state=<none><block_start>""" Initializes all variables of a CoDeepNEAT population either to None/default values or to an initial state if such is supplied (usually when deserializing population) @param initial_state: dict object holding keys and values to all population variables """<line_sep># Declare internal variables of the CoDeepNEAT population self.generation_counter=<none><line_sep>self.best_genome=<none><line_sep>self.best_fitness=<none><line_sep># Declare and initialize internal variables concerning the module population of the CoDeepNEAT algorithm self.modules=dict()<line_sep>self.mod_species=dict()<line_sep>self.mod_species_repr=dict()<line_sep>self.mod_species_fitness_history=dict()<line_sep>self.mod_species_counter=0<line_sep># Declare and initialize internal variables concerning the blueprint population of the CoDeepNEAT algorithm self.blueprints=dict()<line_sep>self.bp_species=dict()<line_sep>self.bp_species_repr=dict()<line_sep>self.bp_species_fitness_history=dict()<line_sep>self.bp_species_counter=0<line_sep># If an initial state is supplied, then the population was deserialized. Recreate this initial state. <if_stmt>initial_state<is><not><none><block_start>self.generation_counter=initial_state['generation_counter']<line_sep>self.best_genome=initial_state['best_genome']<line_sep>self.best_fitness=initial_state['best_fitness']<line_sep>self.modules=initial_state['modules']<line_sep>self.mod_species=initial_state['mod_species']<line_sep>self.mod_species_repr=initial_state['mod_species_repr']<line_sep>self.mod_species_fitness_history=initial_state['mod_species_fitness_history']<line_sep>self.mod_species_counter=initial_state['mod_species_counter']<line_sep>self.blueprints=initial_state['blueprints']<line_sep>self.bp_species=initial_state['bp_species']<line_sep>self.bp_species_repr=initial_state['bp_species_repr']<line_sep>self.bp_species_fitness_history=initial_state['bp_species_fitness_history']<line_sep>self.bp_species_counter=initial_state['bp_species_counter']<block_end><block_end><def_stmt>summarize_population self<block_start>""" Prints the current state of all CoDeepNEAT population variables to stdout in a formatted and clear manner """<line_sep># Determine average fitness of all blueprints bp_fitness_list=[self.blueprints[bp_id].get_fitness()<for>bp_id self.blueprints]<line_sep>blueprints_avg_fitness=round(statistics.mean(bp_fitness_list) 4)<line_sep># Determine best id of each blueprint species bp_species_best_id=dict()<for_stmt>spec_id,spec_bp_ids self.bp_species.items()<block_start>spec_bp_ids_sorted=sorted(spec_bp_ids key=<lambda>x:self.blueprints[x].get_fitness() reverse=<true>)<line_sep>bp_species_best_id[spec_id]=spec_bp_ids_sorted[0]<block_end># Determine average fitness of all modules mod_fitness_list=[self.modules[mod_id].get_fitness()<for>mod_id self.modules]<line_sep>modules_avg_fitness=round(statistics.mean(mod_fitness_list) 4)<line_sep># Determine best id of each module species mod_species_best_id=dict()<for_stmt>spec_id,spec_mod_ids self.mod_species.items()<block_start>spec_mod_ids_sorted=sorted(spec_mod_ids key=<lambda>x:self.modules[x].get_fitness() reverse=<true>)<line_sep>mod_species_best_id[spec_id]=spec_mod_ids_sorted[0]<block_end># Print summary header print("\n\n\n\033[1m{} Population Summary {}\n\n"<concat>"Generation: {:>4} || Best Genome Fitness: {:>8} || Avg Blueprint Fitness: {:>8} || "<concat>"Avg Module Fitness: {:>8}\033[0m\n"<concat>"Best Genome: {}\n".format('#'<times>60 '#'<times>60 self.generation_counter self.best_fitness blueprints_avg_fitness modules_avg_fitness self.best_genome))<line_sep># Print summary of blueprint species print("\033[1mBlueprint Species || Blueprint Species Avg Fitness || Blueprint Species Size\033[0m")<for_stmt>spec_id,spec_fitness_hisotry self.bp_species_fitness_history.items()<block_start>print("{:>6} || {:>8} || {:>8}".format(spec_id spec_fitness_hisotry[self.generation_counter] len(self.bp_species[spec_id])))<line_sep>print(f"Best BP of Species {spec_id} || {self.blueprints[bp_species_best_id[spec_id]]}")<block_end># Print summary of module species print("\n\033[1mModule Species || Module Species Avg Fitness || Module Species Size\033[0m")<for_stmt>spec_id,spec_fitness_hisotry self.mod_species_fitness_history.items()<block_start>print("{:>6} || {:>8} || {:>8}".format(spec_id spec_fitness_hisotry[self.generation_counter] len(self.mod_species[spec_id])))<line_sep>print(f"Best Mod of Species {spec_id} || {self.modules[mod_species_best_id[spec_id]]}")<block_end># Print summary footer print("\n\033[1m"+'#'<times>142+"\033[0m\n")<block_end><def_stmt>serialize self<arrow>dict<block_start>""" Serializes all CoDeepNEAT population variables to a json compatible dictionary and returns it @return: serialized population variables as a json compatible dict """<line_sep># Serialize all modules serialized_modules=dict()<for_stmt>mod_id,module self.modules.items()<block_start>serialized_modules[mod_id]=module.serialize()<block_end># Serialize all blueprints serialized_blueprints=dict()<for_stmt>bp_id,blueprint self.blueprints.items()<block_start>serialized_blueprints[bp_id]=blueprint.serialize()<block_end># Use serialized module and blueprint population and extend it by population internal evolution information serialized_population={'population_type':'CoDeepNEAT' 'generation_counter':self.generation_counter 'modules':serialized_modules 'mod_species':self.mod_species 'mod_species_repr':self.mod_species_repr<if>self.mod_species_repr<else><none> 'mod_species_fitness_history':self.mod_species_fitness_history 'mod_species_counter':self.mod_species_counter 'blueprints':serialized_blueprints 'bp_species':self.bp_species 'bp_species_repr':self.bp_species_repr<if>self.bp_species_repr<else><none> 'bp_species_fitness_history':self.bp_species_fitness_history 'bp_species_counter':self.bp_species_counter 'best_genome':self.best_genome.serialize() 'best_fitness':self.best_fitness}<line_sep><return>serialized_population<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>.triplet TripletLoss SoftTripletLoss<import_from_stmt>.crossentropy CrossEntropyLabelSmooth SoftEntropy<line_sep>__all__=['TripletLoss' 'CrossEntropyLabelSmooth' 'SoftTripletLoss' 'SoftEntropy']<line_sep>
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>torch<import_from_stmt>probflow.distributions Deterministic<import_from_stmt>probflow.utils.torch_distributions get_TorchDeterministic<line_sep>tod=torch.distributions<def_stmt>is_close a b tol=1e-3<block_start><return>np.abs(a-b)<l>tol<block_end><def_stmt>test_TorchDeterministic <block_start>"""Tests the TorchDeterministic distribution"""<line_sep>TorchDeterministic=get_TorchDeterministic()<line_sep>dist=TorchDeterministic(loc=torch.tensor([2.0]) validate_args=<true>)<assert_stmt>is_close(dist.mean.numpy()[0] 2.0)<assert_stmt>is_close(dist.stddev 0.0)<assert_stmt>is_close(dist.variance 0.0)<line_sep>dist.expand([5 2])<line_sep>dist.rsample()<line_sep>dist.log_prob(torch.tensor([1.0]))<line_sep>dist.cdf(torch.tensor([1.0]))<line_sep>dist.icdf(torch.tensor([1.0]))<line_sep>dist.entropy()<block_end><def_stmt>test_Deterministic <block_start>"""Tests Deterministic distribution"""<line_sep># Create the distribution dist=Deterministic()<line_sep># Check default params <assert_stmt>dist.loc<eq>0<line_sep># Call should return backend obj <assert_stmt>isinstance(dist() tod.distribution.Distribution)<line_sep># Test methods <assert_stmt>dist.prob(torch.zeros([1])).numpy()<eq>1.0<assert_stmt>dist.prob(torch.ones([1])).numpy()<eq>0.0<assert_stmt>dist.log_prob(torch.zeros([1])).numpy()<eq>0.0<assert_stmt>dist.log_prob(torch.ones([1])).numpy()<eq>-np.inf<assert_stmt>dist.mean().numpy()<eq>0.0<line_sep># Test sampling samples=dist.sample()<assert_stmt>isinstance(samples torch.Tensor)<assert_stmt>samples.ndim<eq>0<line_sep>samples=dist.sample(10)<assert_stmt>isinstance(samples torch.Tensor)<assert_stmt>samples.ndim<eq>1<assert_stmt>samples.shape[0]<eq>10<line_sep>samples=dist.sample(torch.tensor([10]))<assert_stmt>isinstance(samples torch.Tensor)<assert_stmt>samples.ndim<eq>1<assert_stmt>samples.shape[0]<eq>10<line_sep># Should be able to set params dist=Deterministic(loc=2)<assert_stmt>dist.loc<eq>2<assert_stmt>dist.prob(2<times>torch.ones([1])).numpy()<eq>1.0<assert_stmt>dist.prob(torch.ones([1])).numpy()<eq>0.0<line_sep># But only with Tensor-like objs <with_stmt>pytest.raises(TypeError)<block_start>dist=Deterministic(loc="lalala")<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>pytest<import_from_stmt>pytoshop blending_range<def_stmt>_test_default_blending_range b<block_start><assert_stmt>b.black0<eq>0<assert_stmt>b.black1<eq>0<assert_stmt>b.white0<eq>0<assert_stmt>b.white1<eq>0<block_end><def_stmt>test_default_blending_range <block_start>b=blending_range.BlendingRange()<line_sep>_test_default_blending_range(b)<line_sep>b.black0=1<line_sep>b.black1=2<line_sep>b.white0=3<line_sep>b.white1=4<assert_stmt>b.black0<eq>1<assert_stmt>b.black1<eq>2<assert_stmt>b.white0<eq>3<assert_stmt>b.white1<eq>4<block_end><def_stmt>test_default_blending_range_pair <block_start>pair=blending_range.BlendingRangePair()<line_sep>_test_default_blending_range(pair.src)<line_sep>_test_default_blending_range(pair.dst)<with_stmt>pytest.raises(TypeError)<block_start>pair.src=<none><block_end><with_stmt>pytest.raises(TypeError)<block_start>pair.dst=<none><block_end>pair.src=pair.dst<block_end>
# Copyright (C) 2020-2021 Cancer Care Associates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>pathlib<import_from_stmt>pymedphys._imports streamlit<as>st<import_stmt>pymedphys<import_from_stmt>pymedphys._streamlit.utilities config<as>st_config<line_sep>HERE=pathlib.Path(__file__).parent<def_stmt>config_on_disk <block_start><return><none><block_end><def_stmt>config_in_current_directory <block_start><return>HERE<block_end>@st.cache<def_stmt>download_demo_files <block_start>cwd=pathlib.Path.cwd()<line_sep>pymedphys.zip_data_paths("metersetmap-gui-e2e-data.zip" extract_directory=cwd)<line_sep><return>cwd.joinpath("pymedphys-gui-demo")<block_end>CONFIG_OPTIONS={"Config on Disk":config_on_disk "File Upload/Download Only":config_in_current_directory "Demo Data":download_demo_files }<def_stmt>get_config config_mode<block_start>path=CONFIG_OPTIONS[config_mode]()<line_sep><return>st_config.get_config(path)<block_end>@st.cache<def_stmt>get_dicom_export_locations config<block_start>site_directories=st_config.get_site_directories(config)<line_sep>dicom_export_locations={site:directories["monaco"].parent.parent.joinpath("DCMXprtFile")<for>site,directories site_directories.items()}<line_sep><return>dicom_export_locations<block_end>@st.cache<def_stmt>get_icom_live_stream_directories config<block_start>icom_live_stream_directories={}<for_stmt>site config["site"]<block_start>icom_live_base_directory=pathlib.Path(site["export-directories"]["icom_live"])<for_stmt>linac site["linac"]<block_start>icom_live_stream_directories[linac["name"]]=str(icom_live_base_directory.joinpath(linac["ip"]))<block_end><block_end><return>icom_live_stream_directories<block_end>@st.cache<def_stmt>get_machine_centre_map config<block_start>machine_centre_map={}<for_stmt>site config["site"]<block_start><for_stmt>linac site["linac"]<block_start>machine_centre_map[linac["name"]]=site["name"]<block_end><block_end><return>machine_centre_map<block_end><def_stmt>_get_alias_with_fallback site_mosaiq_config<block_start><try_stmt><block_start><return>site_mosaiq_config["alias"]<block_end><except_stmt>KeyError<block_start><pass><block_end><try_stmt><block_start>port=site_mosaiq_config["port"]<block_end><except_stmt>KeyError<block_start>port=1433<block_end><return>f"{site_mosaiq_config['hostname']}:{port}"<block_end>@st.cache<def_stmt>get_mosaiq_details config<block_start>mosaiq_details={site["name"]:{"timezone":site["mosaiq"]["timezone"] "server":{"hostname":site["mosaiq"]["hostname"] "port":site["mosaiq"]["port"] "alias":_get_alias_with_fallback(site["mosaiq"]) } }<for>site config["site"]}<line_sep><return>mosaiq_details<block_end>@st.cache<def_stmt>get_default_icom_directories config<block_start>default_icom_directory=config["icom"]["patient_directories"]<line_sep><return>default_icom_directory<block_end>@st.cache<def_stmt>get_default_gamma_options config<block_start>default_gamma_options=config["gamma"]<line_sep><return>default_gamma_options<block_end>@st.cache<def_stmt>get_logfile_root_dir config<block_start>logfile_root_dir=pathlib.Path(config["trf_logfiles"]["root_directory"])<line_sep><return>logfile_root_dir<block_end>@st.cache<def_stmt>get_indexed_backups_directory config<block_start>logfile_root_dir=get_logfile_root_dir(config)<line_sep>indexed_backups_directory=logfile_root_dir.joinpath("diagnostics/already_indexed")<line_sep><return>indexed_backups_directory<block_end>@st.cache<def_stmt>get_indexed_trf_directory config<block_start>logfile_root_dir=get_logfile_root_dir(config)<line_sep>indexed_trf_directory=logfile_root_dir.joinpath("indexed")<line_sep><return>indexed_trf_directory<block_end><def_stmt>get_gamma_options config advanced_mode<block_start>default_gamma_options=get_default_gamma_options(config)<if_stmt>advanced_mode<block_start>st.sidebar.markdown(""" # Gamma parameters """)<line_sep>result={**default_gamma_options **{"dose_percent_threshold":st.sidebar.number_input("MU Percent Threshold" value=default_gamma_options["dose_percent_threshold"] ) "distance_mm_threshold":st.sidebar.number_input("Distance (mm) Threshold" value=default_gamma_options["distance_mm_threshold"] ) "local_gamma":st.sidebar.checkbox("Local Gamma" default_gamma_options["local_gamma"]) "max_gamma":st.sidebar.number_input("Max Gamma" value=default_gamma_options["max_gamma"]) } }<block_end><else_stmt><block_start>result=default_gamma_options<block_end><return>result<block_end>
# $Id$ # # Copyright (C) 2003-2006 Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # <import_stmt>random<import_stmt>unittest<import_from_stmt>io StringIO<import_from_stmt>rdkit.DataStructs.TopNContainer TopNContainer _exampleCode<import_from_stmt>rdkit.TestRunner redirect_stdout<class_stmt>TestCase(unittest.TestCase)<block_start><def_stmt>test1 self# simple test with a known answer <block_start>cont=TopNContainer(4)<for_stmt>foo range(10)<block_start>cont.Insert(foo str(foo))<block_end><assert_stmt>cont.GetPts()<eq>list(range(6 10))<assert_stmt>cont.GetExtras()<eq>[str(x)<for>x range(6 10)]<block_end><def_stmt>test2 self# larger scale random test <block_start>cont=TopNContainer(50)<for_stmt>_ range(1000)<block_start>cont.Insert(random.random())<block_end>vs=cont.GetPts()<line_sep>last=vs.pop(0)<while_stmt>vs<block_start><assert_stmt>vs[0]<ge>last<line_sep>last=vs.pop(0)<block_end><block_end><def_stmt>test3 self# random test with extras <block_start>cont=TopNContainer(10)<for_stmt>_ range(100)<block_start>v=random.random()<line_sep>cont.Insert(v v+1)<block_end>vs=cont.GetExtras()<line_sep>last=vs.pop(0)<while_stmt>vs<block_start><assert_stmt>vs[0]<ge>last<line_sep>last=vs.pop(0)<block_end><block_end><def_stmt>test4 self# random test with extras and getitem <block_start>cont=TopNContainer(10)<for_stmt>i range(100)<block_start>v=random.random()<line_sep>cont.Insert(v v+1)<block_end>lastV,lastE=cont[0]<for_stmt>i range(1 len(cont))<block_start>v,e=cont[i]<assert_stmt>v<ge>lastV<assert_stmt>e<ge>lastE<line_sep>lastV,lastE=v e<block_end><block_end><def_stmt>test5 self# random test with extras and getitem, include reverse <block_start>cont=TopNContainer(10)<for_stmt>i range(100)<block_start>v=random.random()<line_sep>cont.Insert(v v+1)<block_end>cont.reverse()<line_sep>lastV,lastE=cont[0]<for_stmt>i range(1 len(cont))<block_start>v,e=cont[i]<assert_stmt>v<le>lastV<assert_stmt>e<le>lastE<line_sep>lastV,lastE=v e<block_end><block_end><def_stmt>test_keepAll self# simple test with a known answer where we keep all <block_start>cont=TopNContainer(-1)<for_stmt>i range(10)<block_start>cont.Insert(9-i str(9-i))<line_sep>self.assertEqual(len(cont) i+1)<block_end><assert_stmt>cont.GetPts()<eq>list(range(10))<assert_stmt>cont.GetExtras()<eq>[str(x)<for>x range(10)]<block_end><def_stmt>test_exampleCode self# We make sure that the example code runs <block_start>f=StringIO()<with_stmt>redirect_stdout(f)<block_start>_exampleCode()<block_end>s=f.getvalue()<line_sep>self.assertIn('[58, 75, 78, 84]' s)<block_end><block_end><if_stmt>__name__<eq>'__main__'# pragma: nocover <block_start>unittest.main()<block_end>
<import_from_stmt>pandas.api.types CategoricalDtype<import_from_stmt>sklearn.metrics mean_squared_error<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>json<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--library' choices=['h2o' 'lightgbm' 'sklearn' 'xgboost' 'catboost'] required=<true>)<line_sep>args=parser.parse_args()<line_sep># Load the data. path_train='data/boston_train.csv'<line_sep>path_test='data/boston_test.csv'<line_sep>target_column_name="medv"<line_sep>chas_options=["0" "1"]<line_sep>dtype={'crim':np.float64 'zn':np.float64 'indus':np.float64 'chas':CategoricalDtype(categories=chas_options) 'nox':np.float64 'rm':np.float64 'age':np.float64 'dis':np.float64 'rad':np.int64 'tax':np.float64 'ptratio':np.float64 'b':np.float64 'lstat':np.float64 }<line_sep>data_train=pd.read_csv(path_train dtype=dtype)<line_sep>data_test=pd.read_csv(path_test dtype=dtype)<if_stmt>args.library<eq>'xgboost'<or>args.library<eq>'sklearn'<or>args.library<eq>'catboost'<block_start>categorical_columns=data_train.select_dtypes(['category']).columns<line_sep>data_train.loc[: categorical_columns]=data_train.loc[: categorical_columns].apply(<lambda>x:x.cat.codes)<line_sep>data_test.loc[: categorical_columns]=data_test.loc[: categorical_columns].apply(<lambda>x:x.cat.codes)<block_end>labels_train=data_train.pop(target_column_name)<line_sep>features_train=data_train<line_sep>labels_test=data_test.pop(target_column_name)<line_sep>features_test=data_test<line_sep># Train the model. <if_stmt>args.library<eq>'h2o'<block_start><import_stmt>h2o<import_from_stmt>h2o.estimators H2OGradientBoostingEstimator<line_sep>h2o.init()<line_sep>data_train=pd.concat([features_train labels_train] axis=1)<line_sep>data_test=pd.concat([features_test labels_test] axis=1)<line_sep>data_train=h2o.H2OFrame(python_obj=data_train)<line_sep>data_test=h2o.H2OFrame(python_obj=data_test)<line_sep>feature_column_names=[column<for>column data_train.columns<if>column<ne>target_column_name]<line_sep>model=H2OGradientBoostingEstimator(distribution="gaussian" learn_rate=0.1 ntrees=100 )<line_sep>model.train(training_frame=data_train y=target_column_name x=feature_column_names )<block_end><elif_stmt>args.library<eq>'lightgbm'<block_start><import_stmt>lightgbm<as>lgb<line_sep>model=lgb.LGBMRegressor(learning_rate=0.1 n_estimators=100 num_leaves=255 )<line_sep>model.fit(features_train labels_train)<block_end><elif_stmt>args.library<eq>'sklearn'<block_start><import_from_stmt>sklearn.experimental enable_hist_gradient_boosting<import_from_stmt>sklearn.ensemble HistGradientBoostingRegressor<line_sep>model=HistGradientBoostingRegressor(learning_rate=0.1 max_iter=100 max_leaf_nodes=255 validation_fraction=<none> )<line_sep>model.fit(features_train labels_train)<block_end><elif_stmt>args.library<eq>'xgboost'<block_start><import_stmt>xgboost<as>xgb<line_sep>model=xgb.XGBRegressor(eta=0.1 eval_metric='logloss' grow_policy='lossguide' max_leaves=255 n_estimators=100 tree_method='hist' use_label_encoder=<false> )<line_sep>model.fit(features_train labels_train)<block_end><elif_stmt>args.library<eq>'catboost'<block_start><import_from_stmt>catboost CatBoostRegressor<line_sep>model=CatBoostRegressor(grow_policy='Lossguide' learning_rate=0.1 n_estimators=100 num_leaves=255 train_dir='data/catboost_info' verbose=<false>)<line_sep>model.fit(features_train labels_train silent=<true>)<block_end># Make predictions on the test data. <if_stmt>args.library<eq>'h2o'<block_start>predictions=model.predict(data_test).as_data_frame()<block_end><else_stmt><block_start>predictions=model.predict(features_test)<block_end># Compute metrics. mse=mean_squared_error(predictions labels_test)<line_sep>print(json.dumps({'mse':mse }))<line_sep>
<import_stmt>time<import_stmt>hashlib<import_from_stmt>flask request<import_from_stmt>.wechat WeChat<import_from_stmt>StockAnalysisSystem.core.config Config<line_sep>wechat:WeChat=<none><line_sep>SasUserWxUserDict={}<line_sep>WxUserSasUserDict={}<line_sep># ---------------------------------------------------------------------------------------------------------------------- <def_stmt>handle_cmd_test parameters:str flask_request:request msg_dict:dict<arrow>str<block_start>wechat_user=msg_dict.get('FromUserName' '')<if_stmt>wechat_user<not><in>WxUserSasUserDict.keys()<block_start><return>''<block_end>user_mgr=wechat.get_user_manager()<line_sep>user_lst=user_mgr.get_user_list()<if_stmt>len(user_lst)<g>0<block_start>wechat.send_user_message(user_lst[0] 'Hello from Sleepy')<block_end><return>'Test Execute Done'<block_end><def_stmt>handle_cmd_login parameters:str flask_request:request msg_dict:dict<arrow>str<block_start>parts=parameters.split(',')<line_sep>username=(parts[0]<if>len(parts)<g>0<else>'').strip()<line_sep>password=(parts[1]<if>len(parts)<g>1<else>'').strip()<line_sep>passwd_sha1=hashlib.sha1(password.encode('utf-8')).hexdigest()<if_stmt>username<eq>'Sleepy'<and>passwd_<l>PASSWORD<g>1<eq>'<PASSWORD>'<block_start>wechat_user=msg_dict.get('FromUserName' '')<if_stmt>wechat_user<ne>''<block_start>SasUserWxUserDict[username]=wechat_user<line_sep>WxUserSasUserDict[wechat_user]=username<line_sep>wechat.get_user_manager().update_user_session(wechat_user 'login' time.time())<line_sep><return>'Login Successful'<block_end><block_end><return>''<block_end><def_stmt>handle_cmd_logoff parameters:str flask_request:request msg_dict:dict<arrow>str<block_start>username=parameters.strip()<if_stmt>username<ne>''<and>username<in>SasUserWxUserDict.keys()<block_start>wechat_user=SasUserWxUserDict[username]<line_sep>wechat.get_user_manager().update_user_session(wechat_user 'login' 0)<del_stmt>SasUserWxUserDict[username]<del_stmt>WxUserSasUserDict[wechat_user]<block_end><block_end># ---------------------------------------------------------------------------------------------------------------------- <def_stmt>parse_command text:str<arrow>(str str)<block_start>parts=text.split(':')<line_sep>command=(parts[0]<if>len(parts)<g>0<else>'').strip()<line_sep>parameters=(parts[1]<if>len(parts)<g>1<else>'').strip()<line_sep><return>command parameters<block_end><def_stmt>handle_command flask_request:request msg_dict:dict<arrow>(bool str)<block_start>content:str=msg_dict.get('Content' '')<line_sep>command,parameters=parse_command(content)<if_stmt>command<eq>'test'<block_start><return><true> handle_cmd_test(parameters flask_request msg_dict)<block_end><if_stmt>command<eq>'login'<block_start><return><true> handle_cmd_login(parameters flask_request msg_dict)<block_end><if_stmt>command<eq>'logoff'<block_start><return><true> handle_cmd_logoff(parameters flask_request msg_dict)<block_end><return><false> ''<block_end><def_stmt>handle_analysis flask_request:request msg_dict:dict<arrow>(bool str)<block_start>content=msg_dict.get('Content' '')<line_sep><return><true> ('<a href="http://172.16.58.3/analysis?security=%s">查看分析结果</a>'%content)<block_end><def_stmt>handle_text_message flask_request:request msg_dict:dict<arrow>str<block_start>ret,resp=handle_command(flask_request msg_dict)<if_stmt>ret<block_start><return>resp<block_end>ret,resp=handle_analysis(flask_request msg_dict)<if_stmt>ret<block_start><return>resp<block_end><return>''<block_end># ---------------------------------------------------------------------------------------------------------------------- <def_stmt>handle_request flask_request:request<arrow>str<block_start><global>wechat<line_sep><return>wechat.handle_request(flask_request)<block_end># ---------------------------------------------------------------------------------------------------------------------- <def_stmt>load_config config:Config<block_start>wechat_token=config.get('wechat_token' '')<line_sep>wechat_app_id=config.get('wechat_app_id' '')<line_sep>wechat_app_secret=config.get('wechat_app_secret' '')<line_sep>print('Load config - WeChat Token: %s'%wechat_token)<line_sep>print('Load config - WeChat App ID: %s'%wechat_app_id)<line_sep>print('Load config - WeChat App Secret: %s'%wechat_app_id)<line_sep><global>wechat<line_sep>wechat.set_token(wechat_token)<line_sep>wechat.set_app_id(wechat_app_id)<line_sep>wechat.set_app_secret(wechat_app_secret)<block_end><def_stmt>init config:Config<block_start><global>wechat<line_sep>wechat=WeChat()<line_sep>load_config(config)<line_sep>wechat.set_msg_handler('text' handle_text_message)<block_end>
# Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>imp<import_stmt>importlib<import_stmt>os<import_stmt>sys<import_from_stmt>pyfora.PureImplementationMapping PureMappingRegistry<def_stmt>typeOfInstance i<block_start><try_stmt><block_start><return>i.__class__<block_end><except_stmt>AttributeError<block_start><return>type(i)<block_end><block_end><class_stmt>PureImplementationMappings(object)<block_start>"""Collection of PureImplementationMapping objects"""<def_stmt>__init__ self<block_start>self.last_seen_sys_modules_len=0<line_sep>self.already_loaded=set()<line_sep>self.mappings=[]<line_sep>self.pythonTypeToMapping={}<line_sep>self.pyforaTypeToMapping={}<line_sep>self.pythonInstanceIdsToMappingAndId={}<line_sep>self.opacity_by_module_name={}<block_end><def_stmt>isOpaqueModule self module<block_start>"""Is this module a system module none of whose pieces has a valid mapping? If so, then we can treat the entire module as unmappable and we don't need to recurse into its interior when mapping its values. """<line_sep>name=module.__name__<if_stmt>name<in>self.opacity_by_module_name<block_start><return>self.opacity_by_module_name[name]<block_end>self.opacity_by_module_name[name]=self._isOpaqueModuleUncached(module)<line_sep><return>self.opacity_by_module_name[name]<block_end><def_stmt>_isOpaqueModuleUncached self module<block_start>self.load_pure_modules()<if_stmt>module.__name__<in>self.already_loaded#this module has pure elements. <block_start><return><false><block_end><if_stmt>"."<in>module.__name__<and>module.__name__.split(".")[0]<in>self.already_loaded#this module has pure elements. <block_start><return><false><block_end><if_stmt><not>hasattr(module '__file__')#this is a builtin module, like 'sys' <block_start><return><true><block_end><if_stmt><not>module.__file__.startswith(sys.prefix)#this is user code <block_start><return><false><block_end><return><true><block_end><def_stmt>addMapping self mapping<block_start>self.mappings.append(mapping)<for_stmt>mappableType mapping.getMappablePythonTypes()<block_start>self.pythonTypeToMapping[mappableType]=mapping<block_end><for_stmt>purePythonType mapping.getPurePythonTypes()<block_start>self.pyforaTypeToMapping[purePythonType]=mapping<block_end><for_stmt>instance mapping.getMappableInstances()<block_start>self.pythonInstanceIdsToMappingAndId[id(instance)]=(mapping instance)<block_end><block_end><def_stmt>canMap self instance<block_start>self.load_pure_modules()<line_sep><return>(typeOfInstance(instance)<in>self.pythonTypeToMapping<or>id(instance)<in>self.pythonInstanceIdsToMappingAndId)<block_end><def_stmt>canInvert self instance<block_start><return>typeOfInstance(instance)<in>self.pyforaTypeToMapping<block_end><def_stmt>canInvertInstancesOf self classMapping<block_start><return>classMapping<in>self.pyforaTypeToMapping<block_end><def_stmt>mappableInstanceToPure self instance<block_start><if_stmt>id(instance)<in>self.pythonInstanceIdsToMappingAndId<block_start>mapper=self.pythonInstanceIdsToMappingAndId[id(instance)][0]<block_end><else_stmt><block_start>mapper=self.pythonTypeToMapping[typeOfInstance(instance)]<block_end><return>mapper.mapPythonInstanceToPyforaInstance(instance)<block_end><def_stmt>pureInstanceToMappable self instance<block_start>mapper=self.pyforaTypeToMapping[typeOfInstance(instance)]<line_sep><return>mapper.mapPyforaInstanceToPythonInstance(instance)<block_end><def_stmt>load_pure_modules self<block_start><if_stmt>len(sys.modules)<le>self.last_seen_sys_modules_len<block_start><return><block_end>loaded_modules=sys.modules.keys()<line_sep>loaded_root_modules=set(m.split('.')[0]<for>m loaded_modules)<for_stmt>root loaded_root_modules<block_start><if_stmt>root<in>self.already_loaded<or>root<eq>'pyfora'<block_start><continue><block_end>self.try_load_pure_module(root)<block_end>self.last_seen_sys_modules_len=len(sys.modules)<block_end><def_stmt>addMappingsForModule self module_name<block_start><for_stmt>mapping PureMappingRegistry.mappingsForRootModule(module_name)<block_start>self.addMapping(mapping)<block_end>self.already_loaded.add(module_name)<block_end><def_stmt>try_load_pure_module self module_name<block_start><try_stmt># first try to load a pyfora pure module, if one exists <block_start>importlib.import_module("pyfora.pure_modules.pure_"+module_name)<line_sep>self.addMappingsForModule(module_name)<block_end><except_stmt>ImportError<block_start><pass><block_end>pyfora_path=os.getenv('PYFORAPATH')<if_stmt>pyfora_path<is><none><block_start><return><block_end><for_stmt>mod [module_name "pure_"+module_name]<block_start>path=os.path.join(pyfora_path mod)<if_stmt>os.path.exists(path)<or>os.path.exists(path+'.py')<block_start><try_stmt><block_start>load_args=imp.find_module(mod pyfora_path)<line_sep>imp.load_module("pyfora.user_pure_modules.pure_"+mod *load_args)<line_sep>self.addMappingsForModule(module_name)<block_end><except_stmt>ImportError<block_start><pass><block_end><block_end><block_end><block_end><block_end>
<def_stmt>config_record_on_account request account_id<block_start><pass><block_end><def_stmt>config_record_on_channel request channel_id<block_start><pass><block_end>
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>argparse<import_from_stmt>src.mesh_file_manager MeshStructureFileManager<import_from_stmt>src.mesh_parser get_parser<import_from_stmt>src.mesh_tree_uploader get_uploader<import_from_stmt>src.ontology_type OntologyType<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--url" type=str required=<true>)<line_sep>parser.add_argument("--type" type=str required=<true>)<line_sep>parser.add_argument("--tmp_path" type=str required=<false>)<line_sep>args=parser.parse_args()<line_sep>url_path=args.url<line_sep>tmp_path=args.tmp_path<line_sep>ontology_type=args.type<if_stmt>ontology_type<not><in>OntologyType.get_allowed()<block_start><raise>RuntimeError("Unsupported ontology type '%s'. Allowed types: %s"%(ontology_type ", ".join(OntologyType.get_allowed())))<block_end>file_manager=MeshStructureFileManager(tmp_path ontology_type)<try_stmt><block_start>path=file_manager.download(url_path)<line_sep>print("Mesh structure successfully downloaded to path '%s'"%path)<line_sep>tree,root_id=get_parser(ontology_type).parse(path)<line_sep>print("Mesh structure successfully parsed. Found '%d' records"%len(tree.nodes))<line_sep>get_uploader(ontology_type tree root_id).upload_tree()<line_sep>print("Mesh structure successfully uploaded!")<block_end><except_stmt>Exception<as>e<block_start>file_manager.delete()<line_sep><raise>e<block_end>file_manager.delete()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_stmt>torch<import_from_stmt>torchvision models transforms<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>inspect<import_from_stmt>numpy.fft fft2 ifft2 fftshift ifftshift<import_stmt>math<import_from_stmt>.utils abs real imag downsample<def_stmt>lowpassfilter size cutoff n<block_start>""" Constructs a low-pass Butterworth filter: f = 1 / (1 + (w/cutoff)^2n) usage: f = lowpassfilter(sze, cutoff, n) where: size is a tuple specifying the size of filter to construct [rows cols]. cutoff is the cutoff frequency of the filter 0 - 0.5 n is the order of the filter, the higher n is the sharper the transition is. (n must be an integer >= 1). Note that n is doubled so that it is always an even integer. The frequency origin of the returned filter is at the corners. """<if_stmt>cutoff<l>0.<or>cutoff<g>0.5<block_start><raise>Exception('cutoff must be between 0 and 0.5')<block_end><elif_stmt>n%1<block_start><raise>Exception('n must be an integer >= 1')<block_end><if_stmt>len(size)<eq>1<block_start>rows=cols=size<block_end><else_stmt><block_start>rows,cols=size<block_end><if_stmt>(cols%2)<block_start>xvals=np.arange(-(cols-1)/2. ((cols-1)/2.)+1)/float(cols-1)<block_end><else_stmt><block_start>xvals=np.arange(-cols/2. cols/2.)/float(cols)<block_end><if_stmt>(rows%2)<block_start>yvals=np.arange(-(rows-1)/2. ((rows-1)/2.)+1)/float(rows-1)<block_end><else_stmt><block_start>yvals=np.arange(-rows/2. rows/2.)/float(rows)<block_end>x,y=np.meshgrid(xvals yvals sparse=<true>)<line_sep>radius=np.sqrt(x<times>x+y<times>y)<line_sep><return>ifftshift(1./(1.+(radius/cutoff)<power>(2.<times>n)))<block_end><def_stmt>filtergrid rows cols# Set up u1 and u2 matrices with ranges normalised to +/- 0.5 <block_start>u1,u2=np.meshgrid(np.linspace(-0.5 0.5 cols endpoint=(cols%2)) np.linspace(-0.5 0.5 rows endpoint=(rows%2)) sparse=<true>)<line_sep># Quadrant shift to put 0 frequency at the top left corner u1=ifftshift(u1)<line_sep>u2=ifftshift(u2)<line_sep># Compute frequency values as a radius from centre (but quadrant shifted) radius=np.sqrt(u1<times>u1+u2<times>u2)<line_sep><return>radius u1 u2<block_end><def_stmt>phasecong2 im<block_start>nscale=4<line_sep>norient=4<line_sep>minWaveLength=6<line_sep>mult=2<line_sep>sigmaOnf=0.55<line_sep>dThetaOnSigma=1.2<line_sep>k=2.0<line_sep>epsilon=.0001<line_sep>thetaSigma=np.pi/norient/dThetaOnSigma<line_sep>_,_,rows,cols=im.shape<line_sep>imagefft=torch.rfft(im 2 onesided=<false>)<line_sep>lp=lowpassfilter((rows cols) .45 15)<line_sep>radius,_,_=filtergrid(rows cols)<line_sep>radius[0 0]=1.<line_sep>logGaborList=[]<line_sep>logGaborDenom=2.<times>np.log(sigmaOnf)<power>2.<for_stmt>s range(nscale)<block_start>wavelength=minWaveLength<times>mult<power>s<line_sep>fo=1./wavelength# Centre frequency of filter logRadOverFo=(np.log(radius/fo))<line_sep>logGabor=np.exp(-(logRadOverFo<times>logRadOverFo)/logGaborDenom)<line_sep>logGabor<augmul>lp# Apply the low-pass filter logGabor[0 0]=0.# Undo the radius fudge logGaborList.append(logGabor)<block_end># Matrix of radii cy=np.floor(rows/2)<line_sep>cx=np.floor(cols/2)<line_sep>y,x=np.mgrid[0:rows 0:cols]<line_sep>y=(y-cy)/rows<line_sep>x=(x-cx)/cols<line_sep>radius=np.sqrt(x<power>2+y<power>2)<line_sep>theta=np.arctan2(-y x)<line_sep>radius=ifftshift(radius)# Quadrant shift radius and theta so that filters theta=ifftshift(theta)# are constructed with 0 frequency at the corners. radius[0 0]=1<line_sep>sintheta=np.sin(theta)<line_sep>costheta=np.cos(theta)<line_sep>spreadList=[]<for_stmt>o np.arange(norient)<block_start>angl=o<times>np.pi/norient# Filter angle. ds=sintheta<times>math.cos(angl)-costheta<times>math.sin(angl)# Difference in sine. dc=costheta<times>math.cos(angl)+sintheta<times>math.sin(angl)# Difference in cosine. dtheta=np.abs(np.arctan2(ds dc))# Absolute angular distance. # dtheta = np.minimum(dtheta*NumberAngles/2, math.pi) spread=np.exp((-dtheta<power>2)/(2<times>thetaSigma<power>2))<line_sep># Calculate the angular spreadList.append(spread)<block_end>ifftFilterArray=[[] [] [] []]<line_sep>filterArray=[[] [] [] []]<for_stmt>o np.arange(norient)<block_start><for_stmt>s np.arange(nscale)<block_start>filter=logGaborList[s]<times>spreadList[o]<line_sep>filterArray[o].append(torch.from_numpy(filter).reshape(1 1 rows cols).float().to(im.device))<line_sep>ifftFilt=np.real(ifft2(filter))<times>math.sqrt(rows<times>cols)<line_sep>ifftFilterArray[o].append(torch.from_numpy(ifftFilt).reshape(1 1 rows cols).float().to(im.device))<block_end><block_end>EnergyAll=0<line_sep>AnAll=0<for_stmt>o np.arange(norient)<block_start>sumE_ThisOrient=0<line_sep>sumO_ThisOrient=0<line_sep>sumAn_ThisOrient=0<line_sep>Energy=0<line_sep>MatrixEOList=[]<for_stmt>s np.arange(nscale)<block_start>filter=filterArray[o][s]<line_sep>c=imagefft<times>filter.unsqueeze(-1).repeat(1 1 1 1 2)<line_sep>MatrixEO=torch.ifft(imagefft<times>filter.unsqueeze(-1).repeat(1 1 1 1 2) 2)<line_sep>MatrixEOList.append(MatrixEO)<line_sep>An=abs(MatrixEO)# Amplitude of even & odd filter response. sumAn_ThisOrient=sumAn_ThisOrient+An# Sum of amplitude responses. sumE_ThisOrient=sumE_ThisOrient+real(MatrixEO)# Sum of even filter convolution results. sumO_ThisOrient=sumO_ThisOrient+imag(MatrixEO)# Sum of odd filter convolution results. <if_stmt>s<eq>0<block_start>EM_n=torch.sum(filter<power>2 dim=[1 2 3])<line_sep>maxAn=An<block_end><else_stmt><block_start>maxAn=torch.max(maxAn An)<block_end><block_end>XEnergy=torch.sqrt(sumE_ThisOrient<power>2+sumO_ThisOrient<power>2+1e-12)+epsilon<line_sep>MeanE=sumE_ThisOrient/XEnergy<line_sep>MeanO=sumO_ThisOrient/XEnergy<for_stmt>s np.arange(nscale)<block_start>EO=MatrixEOList[s]<line_sep>E=real(EO)<line_sep>O=imag(EO)<line_sep>Energy=Energy+E<times>MeanE+O<times>MeanO-torch.abs(E<times>MeanO-O<times>MeanE)<block_end>meanE2n=torch.median((abs(MatrixEOList[0])<power>2).view(im.shape[0] -1) dim=1)[0]/-math.log(0.5)<line_sep>noisePower=meanE2n/EM_n<line_sep>EstSumAn2=0<for_stmt>s np.arange(nscale)<block_start>EstSumAn2=EstSumAn2+ifftFilterArray[o][s]<power>2<block_end>EstSumAiAj=0<for_stmt>si np.arange(nscale-1)<block_start><for_stmt>sj np.arange(si+1 nscale)<block_start>EstSumAiAj=EstSumAiAj+ifftFilterArray[o][si]<times>ifftFilterArray[o][sj]<block_end><block_end>sumEstSumAn2=torch.sum(EstSumAn2 dim=[1 2 3])<line_sep>sumEstSumAiAj=torch.sum(EstSumAiAj dim=[1 2 3])<line_sep>EstNoiseEnergy2=2<times>noisePower<times>sumEstSumAn2+4<times>noisePower<times>sumEstSumAiAj<line_sep>tau=torch.sqrt(EstNoiseEnergy2/2+1e-12)<line_sep>EstNoiseEnergySigma=torch.sqrt((2-math.pi/2)<times>tau<power>2+1e-12)<line_sep>T=tau<times>math.sqrt(math.pi/2)+k<times>EstNoiseEnergySigma<line_sep>T=T/1.7<line_sep>Energy=F.relu(Energy-T.view(-1 1 1 1))<line_sep>EnergyAll=EnergyAll+Energy<line_sep>AnAll=AnAll+sumAn_ThisOrient<block_end>ResultPC=EnergyAll/AnAll<line_sep><return>ResultPC<block_end><def_stmt>fsim imageRef imageDis<block_start>channels=imageRef.shape[1]<if_stmt>channels<eq>3<block_start>Y1=(0.299<times>imageRef[: 0 : :]+0.587<times>imageRef[: 1 : :]+0.114<times>imageRef[: 2 : :]).unsqueeze(1)<line_sep>Y2=(0.299<times>imageDis[: 0 : :]+0.587<times>imageDis[: 1 : :]+0.114<times>imageDis[: 2 : :]).unsqueeze(1)<line_sep>I1=(0.596<times>imageRef[: 0 : :]-0.274<times>imageRef[: 1 : :]-0.322<times>imageRef[: 2 : :]).unsqueeze(1)<line_sep>I2=(0.596<times>imageDis[: 0 : :]-0.274<times>imageDis[: 1 : :]-0.322<times>imageDis[: 2 : :]).unsqueeze(1)<line_sep>Q1=(0.211<times>imageRef[: 0 : :]-0.523<times>imageRef[: 1 : :]+0.312<times>imageRef[: 2 : :]).unsqueeze(1)<line_sep>Q2=(0.211<times>imageDis[: 0 : :]-0.523<times>imageDis[: 1 : :]+0.312<times>imageDis[: 2 : :]).unsqueeze(1)<line_sep>Y1,Y2=downsample(Y1 Y2)<line_sep>I1,I2=downsample(I1 I2)<line_sep>Q1,Q2=downsample(Q1 Q2)<block_end><elif_stmt>channels<eq>1<block_start>Y1,Y2=downsample(imageRef imageDis)<block_end><else_stmt><block_start><raise>ValueError('channels error')<block_end>PC1=phasecong2(Y1)<line_sep>PC2=phasecong2(Y2)<line_sep>dx=torch.Tensor([[3 0 -3] [10 0 -10] [3 0 -3]]).float()/16<line_sep>dy=torch.Tensor([[3 10 3] [0 0 0] [-3 -10 -3]]).float()/16<line_sep>dx=dx.reshape(1 1 3 3).to(imageRef.device)<line_sep>dy=dy.reshape(1 1 3 3).to(imageRef.device)<line_sep>IxY1=F.conv2d(Y1 dx stride=1 padding=1)<line_sep>IyY1=F.conv2d(Y1 dy stride=1 padding=1)<line_sep>gradientMap1=torch.sqrt(IxY1<power>2+IyY1<power>2+1e-12)<line_sep>IxY2=F.conv2d(Y2 dx stride=1 padding=1)<line_sep>IyY2=F.conv2d(Y2 dy stride=1 padding=1)<line_sep>gradientMap2=torch.sqrt(IxY2<power>2+IyY2<power>2+1e-12)<line_sep>T1=0.85<line_sep>T2=160<line_sep>PCSimMatrix=(2<times>PC1<times>PC2+T1)/(PC1<power>2+PC2<power>2+T1)<line_sep>gradientSimMatrix=(2<times>gradientMap1<times>gradientMap2+T2)/(gradientMap1<power>2+gradientMap2<power>2+T2)<line_sep>PCm=torch.max(PC1 PC2)<line_sep>SimMatrix=gradientSimMatrix<times>PCSimMatrix<times>PCm<line_sep>FSIM_val=torch.sum(SimMatrix dim=[1 2 3])/torch.sum(PCm dim=[1 2 3])<if_stmt>channels<eq>1<block_start><return>FSIM_val<block_end>T3=200<line_sep>T4=200<line_sep>ISimMatrix=(2<times>I1<times>I2+T3)/(I1<power>2+I2<power>2+T3)<line_sep>QSimMatrix=(2<times>Q1<times>Q2+T4)/(Q1<power>2+Q2<power>2+T4)<line_sep>SimMatrixC=gradientSimMatrix<times>PCSimMatrix<times>PCm<times>torch.sign(gradientSimMatrix)<times>((torch.abs(ISimMatrix<times>QSimMatrix)+1e-12)<power>0.03)<line_sep><return>torch.sum(SimMatrixC dim=[1 2 3])/torch.sum(PCm dim=[1 2 3])<block_end><class_stmt>FSIM(torch.nn.Module)# Refer to https://sse.tongji.edu.cn/linzhang/IQA/FSIM/FSIM.htm <block_start><def_stmt>__init__ self channels=3<block_start>super(FSIM self).__init__()<block_end><def_stmt>forward self y x as_loss=<true><block_start><assert_stmt>x.shape<eq>y.shape<line_sep>x=x<times>255<line_sep>y=y<times>255<if_stmt>as_loss<block_start>score=fsim(x y)<line_sep><return>1-score.mean()<block_end><else_stmt><block_start><with_stmt>torch.no_grad()<block_start>score=fsim(x y)<block_end><return>score<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>PIL Image<import_stmt>argparse<import_from_stmt>utils prepare_image<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--ref' type=str default='images/r0.png')<line_sep>parser.add_argument('--dist' type=str default='images/r1.png')<line_sep>args=parser.parse_args()<line_sep>device=torch.device('cuda'<if>torch.cuda.is_available()<else>'cpu')<line_sep>ref=prepare_image(Image.open(args.ref).convert("RGB") repeatNum=1).to(device)<line_sep>dist=prepare_image(Image.open(args.dist).convert("RGB") repeatNum=1).to(device)<line_sep>model=FSIM(channels=3).to(device)<line_sep>score=model(dist ref as_loss=<false>)<line_sep>print('score: %.4f'%score.item())<line_sep># score: 0.7843 <block_end>
<import_stmt>librosa<import_stmt>madmom<import_from_stmt>madmom.features.beats *<import_from_stmt>scipy signal<import_stmt>numpy<as>np<def_stmt>peak_picking beat_times total_samples kernel_size offset# smoothing the beat function <block_start>cut_off_norm=len(beat_times)/total_samples<times>100/2<line_sep>b,a=signal.butter(1 cut_off_norm)<line_sep>beat_times=signal.filtfilt(b a beat_times)<line_sep># creating a list of samples for the rnn beats beat_samples=np.linspace(0 total_samples len(beat_times) endpoint=<true> dtype=int)<line_sep>n_t_medians=signal.medfilt(beat_times kernel_size=kernel_size)<line_sep>offset=0.01<line_sep>peaks=[]<for_stmt>i range(len(beat_times)-1)<block_start><if_stmt>beat_times[i]<g>0<block_start><if_stmt>beat_times[i]<g>beat_times[i-1]<block_start><if_stmt>beat_times[i]<g>beat_times[i+1]<block_start><if_stmt>beat_times[i]<g>(n_t_medians[i]+offset)<block_start>peaks.append(int(beat_samples[i]))<block_end><block_end><block_end><block_end><block_end><return>peaks<block_end><def_stmt>analyze y sr<block_start>data={}<line_sep># sample rate data['sample_rate']=sr<line_sep># getting duration in seconds data['duration']=librosa.get_duration(y=y sr=sr)<line_sep># beats prediction # rnn_processor = RNNBeatProcessor() # beats = rnn_processor(y) rnn_processor=RNNBeatProcessor(post_processor=<none>)<line_sep>predictions=rnn_processor(y)<line_sep>mm_processor=MultiModelSelectionProcessor(num_ref_predictions=<none>)<line_sep>beats=mm_processor(predictions)<line_sep>data['beat_samples']=peak_picking(beats len(y) 5 0.01)<if_stmt>len(data['beat_samples'])<l>3<block_start>data['beat_samples']=peak_picking(beats len(y) 25 0.01)<block_end><if_stmt>data['beat_samples']<eq>[]<block_start>data['beat_samples']=[0]<block_end>data['number_of_beats']=len(data['beat_samples'])<line_sep># tempo data['tempo_float']=(len(data['beat_samples'])-1)<times>60/data['duration']<line_sep>data['tempo_int']=int(data['tempo_float'])<line_sep># noisiness featues data['zero_crossing']=librosa.feature.zero_crossing_rate(y)[0].tolist()<line_sep>data['noisiness_median']=float(np.median(data['zero_crossing']))<line_sep>data['noisiness_sum']=sum(librosa.zero_crossings(y)/y.shape[0])<line_sep># spectral features notes=[]<try_stmt><block_start>chroma=librosa.feature.chroma_cqt(y n_chroma=12 bins_per_octave=12 n_octaves=8 hop_length=512)<line_sep># CONVERSION TABLE # 0 c 261.63 # 1 c# 277.18 # 2 d 293.66 # 3 d# 311.13 # 4 e 329.63 # 5 f 349.23 # 6 f# 369.99 # 7 g 392.00 # 8 g# 415.30 # 9 a 440.00 # 10 a# 466.16 # 11 b 493.88 <for_stmt>col range(chroma.shape[1])<block_start>notes.append(int(np.argmax(chroma[: col])))<block_end>data['notes']=notes<line_sep>data['dominant_note']=int(np.argmax(np.bincount(np.array(notes))))<block_end><except_stmt><block_start>data['notes']=[0]<line_sep>data['dominant_note']=0<block_end><return>data<block_end>
## # Copyright (c) 2010-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """ Benchmark a server's handling of event summary changes. """<import_from_stmt>itertools count<import_from_stmt>urllib2 HTTPDigestAuthHandler<import_from_stmt>twisted.internet reactor<import_from_stmt>twisted.internet.defer inlineCallbacks returnValue<import_from_stmt>twisted.web.client Agent<import_from_stmt>twisted.web.http_headers Headers<import_from_stmt>twisted.web.http NO_CONTENT<import_from_stmt>httpauth AuthHandlerAgent<import_from_stmt>httpclient StringProducer<import_from_stmt>benchlib initialize sample<import_from_stmt>_event_create makeEvent<line_sep>@inlineCallbacks<def_stmt>measure host port dtrace attendeeCount samples fieldName replacer eventPerSample=<false><block_start>user=password="<PASSWORD>"<line_sep>root="/"<line_sep>principal="/"<line_sep>calendar="event-%s-benchmark"%(fieldName )<line_sep>authinfo=HTTPDigestAuthHandler()<line_sep>authinfo.add_password(realm="Test Realm" uri="http://%s:%d/"%(host port) user=user passwd=password)<line_sep>agent=AuthHandlerAgent(Agent(reactor) authinfo)<line_sep># Set up the calendar first <yield>initialize(agent host port user password root principal calendar)<if_stmt>eventPerSample# Create an event for each sample that will be taken, so that no event # is used for two different samples. <block_start>f=_selfish_sample<block_end><else_stmt># Just create one event and re-use it for all samples. <block_start>f=_generous_sample<block_end>data=<yield>f(dtrace replacer agent host port user calendar fieldName attendeeCount samples)<line_sep>returnValue(data)<block_end>@inlineCallbacks<def_stmt>_selfish_sample dtrace replacer agent host port user calendar fieldName attendeeCount samples<block_start>url='http://%s:%s/calendars/__uids__/%s/%s/%s-change-%%d.ics'%(host port user calendar fieldName)<line_sep>headers=Headers({"content-type":["text/calendar"]})<line_sep>events=[# The organizerSequence here (1) may need to be a parameter. # See also the makeEvent call below. (makeEvent(i 1 attendeeCount) url%(i ))<for>i range(samples)]<for_stmt>(event url) events<block_start><yield>agent.request('PUT' url headers StringProducer(event))<block_end># Sample changing the event according to the replacer. samples=<yield>sample(dtrace samples agent (('PUT' url headers StringProducer(replacer(event i)))<for>i,(event url) enumerate(events)).next NO_CONTENT)<line_sep>returnValue(samples)<block_end>@inlineCallbacks<def_stmt>_generous_sample dtrace replacer agent host port user calendar fieldName attendeeCount samples<block_start>url='http://%s:%s/calendars/__uids__/%s/%s/%s-change.ics'%(host port user calendar fieldName)<line_sep>headers=Headers({"content-type":["text/calendar"]})<line_sep># See the makeEvent call above. event=makeEvent(0 1 attendeeCount)<line_sep><yield>agent.request('PUT' url headers StringProducer(event))<line_sep># Sample changing the event according to the replacer. samples=<yield>sample(dtrace samples agent (('PUT' url headers StringProducer(replacer(event i)))<for>i count(1)).next NO_CONTENT)<line_sep>returnValue(samples)<block_end>
# -*- coding: utf-8 -*- """ Name : c11_18_sort_pandas.py Book : Python for Finance (2nd ed.) Publisher: Packt Publishing Ltd. Author : <NAME> Date : 6/6/2017 email : <EMAIL> <EMAIL> """<import_stmt>pandas<as>pd<line_sep>a=pd.DataFrame([[9 4] [9 2] [1 -1]] columns=['A' 'B'])<line_sep>print(a)<line_sep># sort by A ascedning, then B descending b=a.sort_values(['A' 'B'] ascending=[1 0])<line_sep>print(b)<line_sep># sort by A and B, both ascedning c=a.sort_values(['A' 'B'] ascending=[1 1])<line_sep>print(c)<line_sep>
<import_from_stmt>.bindings _FFI _C get_errors<import_from_stmt>functools wraps<import_from_stmt>copy copy deepcopy<import_from_stmt>binascii hexlify unhexlify# pylint: disable=unused-import # Py2/3 compatibility <try_stmt><block_start><import_from_stmt>builtins int# pylint: disable=redefined-builtin <import_from_stmt>builtins object# pylint: disable=redefined-builtin <block_end><except_stmt>BaseException# pylint: disable=bare-except <block_start>print("Cannot mock for docs")<block_end><try_stmt><block_start><import_from_stmt>future.utils python_2_unicode_compatible<block_end><except_stmt>Exception<as>e# pylint: disable=broad-except # An identity decorator <block_start><def_stmt>python_2_unicode_compatible x<block_start><return>x<block_end><block_end><import_stmt>pytest<def_stmt>force_Bn n<block_start>"""A decorator that coerces the nth input to be a Big Number"""<def_stmt>convert_nth f# pylint: disable=star-args <block_start>@wraps(f)<def_stmt>new_f *args **kwargs<block_start>new_args=args<try_stmt><block_start><if_stmt><not>n<l>len(args)<or>args[n].bn# isinstance(args[n], Bn): <block_start>new_args=args<block_end><block_end><except_stmt>BaseException# if not n < len(args): # new_args = args <block_start><if_stmt>isinstance(args[n] int)<block_start>r=Bn.from_num(args[n])<line_sep>new_args=list(args)<line_sep>new_args[n]=r<line_sep>new_args=tuple(new_args)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><return>f(*new_args **kwargs)<block_end><return>new_f<block_end><return>convert_nth<block_end><def_stmt>_check return_val<block_start>"""Checks the return code of the C calls"""<if_stmt>__debug__<block_start><if_stmt>isinstance(return_val int)<and>return_val<eq>1<block_start><return><block_end><if_stmt>isinstance(return_val bool)<and>return_val<eq><true><block_start><return><block_end><block_end><if_stmt>return_val<eq><true><and>return_val<eq>1<block_start><return><block_end>errs=get_errors()<line_sep><raise>Exception("BN exception: %s"%errs)<block_end><class_stmt>BnCtx(object)<block_start>""" A Bn Context for use by the petlib library """<line_sep>__slots__=['bnctx' '_C']<def_stmt>__init__ self<block_start>self._C=_C<line_sep>self.bnctx=self._C.BN_CTX_new()<line_sep>_check(self.bnctx<ne>_FFI.NULL)<block_end><def_stmt>__del__ self<block_start><if_stmt>self.bnctx<is><not><none><block_start>self._C.BN_CTX_free(self.bnctx)<block_end><block_end><block_end><class_stmt>BnCtxNULL(BnCtx)<block_start>""" A Bn Context for use by the petlib library """<line_sep>__slots__=['bnctx' '_C']<def_stmt>__init__ self<block_start>self._C=_C<line_sep>self.bnctx=_FFI.NULL<block_end><def_stmt>__del__ self<block_start><pass><block_end><block_end><import_stmt>threading<line_sep>_thread_local=threading.local()<def_stmt>get_ctx <block_start><global>_thread_local<try_stmt><block_start><return>_thread_local.ctx<block_end><except_stmt>BaseException<block_start>_thread_local.ctx=BnCtx()<line_sep><return>_thread_local.ctx<block_end><block_end>@python_2_unicode_compatible<class_stmt>Bn(object)<block_start>"""The core Big Number class. It supports all comparisons (<, <=, ==, !=, >=, >), arithmetic operations (+, -, %, /, divmod, pow) and copy operations (copy and deep copy). The right-hand side operand may be a small native python integer (<2^64). """<line_sep>__C=_C<line_sep># We know this class will keep minimal state __slots__=['bn']<line_sep># -- static methods @staticmethod<def_stmt>from_num num<block_start><if_stmt>isinstance(num int)<block_start><return>Bn(num)<block_end><elif_stmt>isinstance(num Bn)<block_start><return>num<block_end><else_stmt># raise TypeError("Cannot coerce %s into a BN." % num) <block_start><return>NotImplemented<block_end><block_end>@staticmethod<def_stmt>from_decimal sdec<block_start>"""Creates a Big Number from a decimal string. Args: sdec (string): numeric string possibly starting with minus. See Also: str() produces a decimal string from a big number. Example: >>> hundred = Bn.from_decimal("100") >>> str(hundred) '100' """<line_sep>ptr=_FFI.new("BIGNUM **")<line_sep>read_bytes=_C.BN_dec2bn(ptr sdec.encode("utf8"))<if_stmt>read_bytes<ne>len(sdec)<block_start><raise>Exception("BN Error")<block_end>ret=Bn()<line_sep>_C.BN_copy(ret.bn ptr[0])<line_sep>_C.BN_clear_free(ptr[0])<line_sep><return>ret<block_end>@staticmethod<def_stmt>from_hex shex<block_start>"""Creates a Big Number from a hexadecimal string. Args: shex (string): hex (0-F) string possibly starting with minus. See Also: hex() produces a hexadecimal representation of a big number. Example: >>> Bn.from_hex("FF") 255 """<line_sep>ptr=_FFI.new("BIGNUM **")<line_sep>read_bytes=_C.BN_hex2bn(ptr shex.encode("utf8"))<if_stmt>read_bytes<ne>len(shex)<block_start><raise>Exception("BN Error")<block_end>ret=Bn()<line_sep>_C.BN_copy(ret.bn ptr[0])<line_sep>_C.BN_clear_free(ptr[0])<line_sep><return>ret<block_end>@staticmethod<def_stmt>from_binary sbin<block_start>"""Creates a Big Number from a byte sequence representing the number in Big-endian 8 byte atoms. Only positive values can be represented as byte sequence, and the library user should store the sign bit separately. Args: sbin (string): a byte sequence. Example: >>> byte_seq = unhexlify(b"010203") >>> Bn.from_binary(byte_seq) 66051 >>> (1 * 256**2) + (2 * 256) + 3 66051 """<line_sep>ret=Bn()<line_sep>_C.BN_bin2bn(sbin len(sbin) ret.bn)<line_sep><return>ret<block_end>@staticmethod<def_stmt>get_prime bits safe=1<block_start>""" Builds a prime Big Number of length bits. Args: bits (int) -- the number of bits. safe (int) -- 1 for a safe prime, otherwise 0. """<line_sep>_check(0<l>bits<l>10000)<line_sep>_check(safe<in>[0 1])<line_sep>ret=Bn()<line_sep>_check(_C.BN_generate_prime_ex(ret.bn bits safe _FFI.NULL _FFI.NULL _FFI.NULL))<line_sep><return>ret<block_end>## -- methods _upper_bound=2<power>(64-1)<def_stmt>__init__ self num=0<block_start>'Allocate a Big Number structure, initialized with a small integer or zero.'<line_sep>self.bn=_C.BN_new()<if_stmt>num<eq>0<block_start><return><block_end><if_stmt>__debug__<block_start>_check(0<le>abs(num)<le>self._upper_bound)<line_sep>_check(isinstance(num int))<block_end># Assign <if_stmt>num<ne>0<block_start>ret=_C.BN_set_word(self.bn abs(num))<if_stmt>__debug__<block_start>_check(ret)<block_end><if_stmt>ret<ne>1<block_start><raise>Exception("Bn Exception.")<block_end><block_end><if_stmt>num<l>0<block_start>self._set_neg(1)<block_end><block_end><def_stmt>_set_neg self sign=1# """Sets the sign to "-" (1) or "+" (0)""" <block_start><if_stmt><not>(sign<eq>0<or>sign<eq>1)<block_start><raise>Exception("Sign has to be 0 or 1.")<block_end>_C.BN_set_negative(self.bn sign)<block_end><def_stmt>copy self<block_start>"""Returns a copy of the Bn object."""<line_sep><return>self.__copy__()<block_end><def_stmt>__copy__ self# 'Copies the big number. Support for copy module' <block_start>other=Bn()<line_sep>_C.BN_copy(other.bn self.bn)<line_sep><return>other<block_end><def_stmt>__deepcopy__ self memento# 'Deepcopy is the same as copy' # pylint: disable=unused-argument <block_start><return>self.__copy__()<block_end><def_stmt>__del__ self# 'Deallocate all resources of the big number' <block_start>self.__C.BN_clear_free(self.bn)<block_end><def_stmt>__inner_cmp__ self other# 'Irel comparison function' # if __debug__: # _check( type(other) == Bn ) <block_start><try_stmt><block_start>sig=int(_C.BN_cmp(self.bn other.bn))<line_sep><return>sig<block_end><except_stmt>AttributeError<block_start><return>self.__inner_cmp__(Bn.from_num(other))<block_end><block_end><def_stmt>__lt__ self other<block_start><return>self.__inner_cmp__(other)<l>0<block_end><def_stmt>__le__ self other<block_start><return>self.__inner_cmp__(other)<le>0<block_end><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other int)<block_start>other=Bn(other)<block_end><if_stmt><not>isinstance(other Bn)<block_start><return><false><block_end><return>self.__inner_cmp__(other)<eq>0<block_end><def_stmt>__ne__ self other<block_start><return>self.__inner_cmp__(other)<ne>0<block_end><def_stmt>__gt__ self other<block_start><return>self.__inner_cmp__(other)<g>0<block_end><def_stmt>__ge__ self other<block_start><return>self.__inner_cmp__(other)<ge>0<block_end><def_stmt>bool self<block_start>'Turn Bn into boolean. False if zero, True otherwise.'<line_sep><return>self.__bool__()<block_end><def_stmt>__bool__ self# 'Turn into boolean' <block_start><return><not>(self<eq>Bn(0))<block_end># Python 2 compatibility <def_stmt>__nonzero__ self<block_start><return>self.__bool__()<block_end># Export in different representations <def_stmt>repr self<block_start>'The representation of the number as a decimal string'<line_sep><return>self.__repr__()<block_end><def_stmt>__repr__ self# 'The representation of the number as a decimal string' <block_start>buf=_C.BN_bn2dec(self.bn)<line_sep>s=bytes(_FFI.string(buf))<line_sep>_C.OPENSSL_free(buf)<line_sep><return>s.decode('utf8')<block_end><def_stmt>int self<block_start>"""A native python integer representation of the Big Number. Synonym for int(bn). """<line_sep><return>self.__int__()<block_end><def_stmt>__int__ self<block_start><return>int(self.__repr__())<block_end><def_stmt>__index__ self<block_start><return>int(self.__repr__())<block_end><def_stmt>hex self<block_start>"""The representation of the string in hexadecimal. Synonym for hex(n)."""<line_sep><return>self.__hex__()<block_end><def_stmt>__hex__ self# """The representation of the string in hexadecimal""" <block_start>buf=_C.BN_bn2hex(self.bn)<line_sep>s=bytes(_FFI.string(buf))<line_sep>_C.OPENSSL_free(buf)<line_sep><return>s.decode("utf8")<block_end><def_stmt>binary self<block_start>"""Returns a byte sequence storing the absolute value of the Big Number in Big-Endian format (with 8 bit atoms). You need to extact the sign separately. Example: >>> bin = Bn(66051).binary() >>> hexlify(bin) == b'010203' True """<if_stmt>self<l>0<block_start><raise>Exception("Cannot represent negative numbers")<block_end>size=_C.bn_num_bytes(self.bn)<line_sep>bin_string=_FFI.new("unsigned char[]" size)<line_sep>l=_C.BN_bn2bin(self.bn bin_string)<assert_stmt>int(l)<eq>size<line_sep><return>bytes(_FFI.buffer(bin_string)[:])<block_end><def_stmt>random self<block_start>"""Returns a cryptographically strong random number 0 <= rnd < self. Example: >>> r = Bn(100).random() >>> 0 <= r < 100 True """<line_sep>rnd=Bn()<line_sep>err=_C.BN_rand_range(rnd.bn self.bn)<if_stmt>__debug__<block_start>_check(err)<block_end><return>rnd<block_end># ---------- Arithmetic -------------- <def_stmt>int_neg self<block_start>"""Returns the negative of this number. Synonym with -self. Example: >>> one100 = Bn(100) >>> one100.int_neg() -100 >>> -one100 -100 """<line_sep><return>self.__neg__()<block_end><def_stmt>int_add self other<block_start>"""Returns the sum of this number with another. Synonym for self + other. Example: >>> one100 = Bn(100) >>> two100 = Bn(200) >>> two100.int_add(one100) # Function syntax 300 >>> two100 + one100 # Operator syntax 300 """<line_sep><return>self.__add__(other)<block_end><def_stmt>__radd__ self other<block_start><return>self.__add__(other)<block_end><def_stmt>__add__ self other<block_start><try_stmt><block_start>r=Bn()<line_sep>err=_C.BN_add(r.bn self.bn other.bn)<if_stmt>__debug__<block_start>_check(err)<block_end><return>r<block_end><except_stmt>AttributeError<block_start><return>self.__add__(Bn.from_num(other))<block_end><block_end><def_stmt>int_sub self other<block_start>"""Returns the difference between this number and another. Synonym for self - other. Example: >>> one100 = Bn(100) >>> two100 = Bn(200) >>> two100.int_sub(one100) # Function syntax 100 >>> two100 - one100 # Operator syntax 100 """<line_sep><return>self-other<block_end><def_stmt>__rsub__ self other<block_start><return>Bn(other)-self<block_end><def_stmt>__sub__ self other<block_start><try_stmt><block_start>r=Bn()<line_sep>err=_C.BN_sub(r.bn self.bn other.bn)<if_stmt>__debug__<block_start>_check(err)<block_end><return>r<block_end><except_stmt>AttributeError<block_start><return>self.__sub__(Bn.from_num(other))<block_end><block_end><def_stmt>int_mul self other<block_start>"""Returns the product of this number with another. Synonym for self * other. Example: >>> one100 = Bn(100) >>> two100 = Bn(200) >>> one100.int_mul(two100) # Function syntax 20000 >>> one100 * two100 # Operator syntax 20000 """<line_sep><return>self.__mul__(other)<block_end><def_stmt>__rmul__ self other<block_start><return>self.__mul__(other)<block_end><def_stmt>__mul__ self other<block_start><try_stmt><block_start>r=Bn()<line_sep>local_ctx=get_ctx()<line_sep>err=_C.BN_mul(r.bn self.bn other.bn local_ctx.bnctx)<if_stmt>__debug__<block_start>_check(err)<block_end><return>r<block_end><except_stmt>AttributeError<block_start>other=Bn.from_num(other)<if_stmt>other<is>NotImplemented<block_start><return>NotImplemented<block_end><return>self.__mul__(other)<block_end><block_end># ------------------ Mod arithmetic ------------------------- <def_stmt>mod_add self other m<block_start>""" mod_add(other, m) Returns the sum of self and other modulo m. Example: >>> Bn(10).mod_add(Bn(2), Bn(11)) # Only function notation available 1 """<try_stmt><block_start>r=Bn()<line_sep>local_ctx=get_ctx()<line_sep>err=_C.BN_mod_add(r.bn self.bn other.bn m.bn local_ctx.bnctx)<if_stmt>__debug__<block_start>_check(err)<block_end><return>r<block_end><except_stmt>AttributeError<block_start><return>self.mod_add(Bn.from_num(other) Bn.from_num(m))<block_end><block_end><def_stmt>mod_sub self other m<block_start>""" mod_sub(other, m) Returns the difference of self and other modulo m. Example: >>> Bn(10).mod_sub(Bn(2), Bn(11)) # Only function notation available 8 """<try_stmt><block_start>r=Bn()<line_sep>local_ctx=get_ctx()<line_sep>err=_C.BN_mod_sub(r.bn self.bn other.bn m.bn local_ctx.bnctx)<if_stmt>__debug__<block_start>_check(err)<block_end><return>r<block_end><except_stmt>AttributeError<block_start><return>self.mod_sub(Bn.from_num(other) Bn.from_num(m))<block_end><block_end><def_stmt>mod_mul self other m<block_start>""" mod_mul(other, m) Return the product of self and other modulo m. Example: >>> Bn(10).mod_mul(Bn(2), Bn(11)) # Only function notation available 9 """<try_stmt><block_start>r=Bn()<line_sep>local_ctx=get_ctx()<line_sep>err=_C.BN_mod_mul(r.bn self.bn other.bn m.bn local_ctx.bnctx)<if_stmt>__debug__<block_start>_check(err)<block_end><return>r<block_end><except_stmt>AttributeError<block_start><return>self.mod_mul(Bn.from_num(other) Bn.from_num(m))<block_end><block_end><def_stmt>mod_inverse self m<block_start>""" mod_inverse(m) Compute the inverse mod m, such that self * res == 1 mod m. Example: >>> Bn(10).mod_inverse(m = Bn(11)) # Only function notation available 10 >>> Bn(10).mod_mul(Bn(10), m = Bn(11)) == Bn(1) True """<try_stmt><block_start>res=Bn()<line_sep>local_ctx=get_ctx()<line_sep>err=_C.BN_mod_inverse(res.bn self.bn m.bn local_ctx.bnctx)<if_stmt>err<eq>_FFI.NULL<block_start>errs=get_errors()<if_stmt>errs<eq>[50770023]<block_start><raise>Exception("No inverse")<block_end><elif_stmt>errs<eq>[50782316]<block_start><raise>Exception("No inverse")<block_end><else_stmt><block_start><raise>Exception("Unknown error: %s"%errs)<block_end><block_end><return>res<block_end><except_stmt>AttributeError<block_start><return>self.mod_inverse(Bn.from_num(m))<block_end><block_end><def_stmt>mod_pow self other m ctx=<none><block_start>""" Performs the modular exponentiation of self ** other % m. Example: >>> one100 = Bn(100) >>> one100.mod_pow(2, 3) # Modular exponentiation 1 """<line_sep><return>self.__pow__(other m ctx=ctx)<block_end><def_stmt>divmod self other<block_start>"""Returns the integer division and remainder of this number by another. Synonym for (div, mod) = divmod(self, other)"""<line_sep><return>self.__divmod__(other)<block_end><def_stmt>__rdivmod__ self other<block_start><return>Bn(other).__divmod__(self)<block_end><def_stmt>__divmod__ self other<block_start><try_stmt><block_start>dv=Bn()<line_sep>rem=Bn()<line_sep>local_ctx=get_ctx()<line_sep>ret=_C.BN_div(dv.bn rem.bn self.bn other.bn local_ctx.bnctx)<if_stmt>__debug__<block_start>_check(ret)<block_end><return>(dv rem)<block_end><except_stmt>AttributeError<block_start><return>self.__divmod__(Bn.from_num(other))<block_end><block_end><def_stmt>int_div self other<block_start>"""Returns the integer division of this number by another. Synonym of self / other. Example: >>> one100 = Bn(100) >>> two100 = Bn(200) >>> two100.int_div(one100) # Function syntax 2 >>> two100 / one100 # Operator syntax 2 """<line_sep><return>self.__div__(other)<block_end><def_stmt>__rdiv__ self other<block_start><return>Bn(other).__div__(self)<block_end><def_stmt>__div__ self other<block_start>dv,_=divmod(self other)<line_sep><return>dv<block_end><def_stmt>mod self other<block_start>"""Returns the remainder of this number modulo another. Synonym for self % other. Example: >>> one100 = Bn(100) >>> two100 = Bn(200) >>> two100.mod(one100) # Function syntax 0 >>> two100 % one100 # Operator syntax 0 """<line_sep><return>self.__mod__(other)<block_end><def_stmt>__rmod__ self other<block_start><return>Bn(other).__mod__(self)<block_end><def_stmt>__mod__ self other<block_start><try_stmt><block_start>rem=Bn()<line_sep>local_ctx=get_ctx()<line_sep>err=_C.BN_nnmod(rem.bn self.bn other.bn local_ctx.bnctx)<if_stmt>__debug__<block_start>_check(err)<block_end><return>rem<block_end><except_stmt>AttributeError<block_start>self.__mod__(Bn.from_num(other))<block_end><block_end><def_stmt>__rtruediv__ self other<block_start><return>Bn(other).__truediv__(self)<block_end><def_stmt>__truediv__ self other<block_start><return>self.__div__(other)<block_end><def_stmt>__rfloordiv__ self other<block_start><return>Bn(other).__floordiv__(self)<block_end><def_stmt>__floordiv__ self other<block_start><return>self.__div__(other)<block_end><def_stmt>__rpow__ self other<block_start><return>Bn(other).__pow__(self)<block_end><def_stmt>pow self other modulo=<none> ctx=<none><block_start>"""Returns the number raised to the power other optionally modulo a third number. Synonym with pow(self, other, modulo). Example: >>> one100 = Bn(100) >>> one100.pow(2) # Function syntax 10000 >>> one100 ** 2 # Operator syntax 10000 >>> one100.pow(2, 3) # Modular exponentiation 1 """<if_stmt>modulo<block_start><return>self.__pow__(other modulo ctx)<block_end><else_stmt><block_start><return>self<power>other<block_end><block_end><def_stmt>__pow__ self other modulo=<none> ctx=<none><block_start><try_stmt><block_start>res=Bn()<if_stmt>ctx<is><none><block_start>ctx=BnCtx()<block_end><if_stmt>modulo<is><none><block_start>_check(_C.BN_exp(res.bn self.bn other.bn ctx.bnctx))<block_end><else_stmt><block_start>_check(_C.BN_mod_exp(res.bn self.bn other.bn modulo.bn ctx.bnctx))<block_end><return>res<block_end><except_stmt>BaseException<block_start>other=Bn.from_num(other)<if_stmt>modulo<is><not><none><block_start>modulo=Bn.from_num(modulo)<block_end><return>self.__pow__(other modulo ctx)<block_end><block_end><def_stmt>is_prime self<block_start>"""Returns True if the number is prime, with negligible prob. of error."""<line_sep>res=int(_C.BN_is_prime_ex(self.bn 0 get_ctx().bnctx _FFI.NULL))<if_stmt>res<eq>0<block_start><return><false><block_end><if_stmt>res<eq>1<block_start><return><true><block_end><raise>Exception("Primality test failure %s"%int(res))<block_end><def_stmt>is_odd self<block_start>"""Returns True if the number is odd."""<line_sep><return>bool(_C.bn_is_odd(self.bn))<block_end><def_stmt>is_bit_set self n<block_start>"""Returns True if the nth bit is set"""<line_sep><return>int(_C.BN_is_bit_set(self.bn n))<block_end><def_stmt>num_bits self<block_start>"""Returns the number of bits representing this Big Number"""<line_sep><return>int(_C.BN_num_bits(self.bn))<block_end># Implement negative <def_stmt>__neg__ self# pylint: disable=protected-access <block_start>zero=Bn(0)<line_sep>ret=copy(self)<if_stmt>ret<ge>zero<block_start>ret._set_neg(1)<block_end><else_stmt><block_start>ret._set_neg(0)<block_end><return>ret<block_end><def_stmt>__hash__ self<block_start><return>int(self).__hash__()<block_end><block_end># Unsuported # object.__lshift__(self, other) # object.__rshift__(self, other) # object.__and__(self, other) # object.__xor__(self, other) # object.__or__(self, other) # ---------- Tests ------------ <def_stmt>test_bn_constructors <block_start><assert_stmt>Bn.from_decimal("100")<eq>100<assert_stmt>Bn.from_decimal("-100")<eq>-100<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>Bn.from_decimal("100ABC")<block_end><assert_stmt>'BN Error'<in>str(excinfo.value)<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>Bn.from_hex("100ABCZ")<block_end><assert_stmt>'BN Error'<in>str(excinfo.value)<assert_stmt>Bn.from_hex(Bn(-100).hex())<eq>-100<assert_stmt>Bn(15).hex()<eq>Bn(15).hex()<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>Bn(-100).binary()<block_end><assert_stmt>'negative'<in>str(excinfo.value)<line_sep>#assert Bn.from_binary(Bn(-100).binary()) == 100 <assert_stmt>Bn.from_binary(Bn(100).binary())<eq>Bn(100)<assert_stmt>Bn.from_binary(Bn(100).binary())<eq>100<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>s=10<power>10<line_sep>Bn(s)<block_end><assert_stmt>'does not fit'<in>str(excinfo.value)<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>_check(<false>)<block_end><assert_stmt>'BN'<in>str(excinfo.value)<line_sep>#assert Bn.from_binary(Bn(-100).binary()) != Bn(50) <assert_stmt>int(Bn(-100))<eq>-100<assert_stmt>repr(Bn(5))<eq>Bn(5).repr()<eq>"5"<assert_stmt>range(10)[Bn(4)]<eq>4<line_sep>d={Bn(5):5 Bn(6):6}<assert_stmt>Bn(5)<in>d<block_end><def_stmt>test_bn_prime <block_start>p=Bn.get_prime(128)<assert_stmt>p<g>Bn(0)<assert_stmt>p.is_prime()<assert_stmt><not>Bn(16).is_prime()<assert_stmt>p.num_bits()<g>127<block_end><def_stmt>test_bn_arithmetic <block_start><assert_stmt>(Bn(1)+Bn(1)<eq>Bn(2))<assert_stmt>(Bn(1).int_add(Bn(1))<eq>Bn(2))<assert_stmt>(Bn(1)+1<eq>Bn(2))<line_sep># assert (1 + Bn(1) == Bn(2)) <assert_stmt>(Bn(1)+Bn(-1)<eq>Bn(0))<assert_stmt>(Bn(10)+Bn(10)<eq>Bn(20))<assert_stmt>(Bn(-1)<times>Bn(-1)<eq>Bn(1))<assert_stmt>(Bn(-1).int_mul(Bn(-1))<eq>Bn(1))<assert_stmt>(Bn(10)<times>Bn(10)<eq>Bn(100))<assert_stmt>(Bn(10)-Bn(10)<eq>Bn(0))<assert_stmt>(Bn(10)-Bn(100)<eq>Bn(-90))<assert_stmt>(Bn(10)+(-Bn(10))<eq>Bn(0))<line_sep>s=-Bn(100)<assert_stmt>(Bn(10)+s<eq>Bn(-90))<assert_stmt>(Bn(10)-(-Bn(10))<eq>Bn(20))<assert_stmt>-Bn(-10)<eq>10<assert_stmt>Bn(-10).int_neg()<eq>10<assert_stmt>divmod(Bn(10) Bn(3))<eq>(Bn(3) Bn(1))<assert_stmt>Bn(10).divmod(Bn(3))<eq>(Bn(3) Bn(1))<assert_stmt>Bn(10)/Bn(3)<eq>Bn(3)<assert_stmt>Bn(10)<floordiv>Bn(3)<eq>Bn(3)<assert_stmt>Bn(10).int_div(Bn(3))<eq>Bn(3)<assert_stmt>Bn(10)%Bn(3)<eq>Bn(1)<assert_stmt>Bn(10).mod(Bn(3))<eq>Bn(1)<assert_stmt>Bn(2)<power>Bn(8)<eq>Bn(2<power>8)<assert_stmt>pow(Bn(2) Bn(8) Bn(27))<eq>Bn(2<power>8%27)<line_sep>pow(Bn(10) Bn(10)).binary()<assert_stmt>pow(Bn(2) 8 27)<eq>2<power>8%27<assert_stmt>Bn(3).mod_inverse(16)<eq>11<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>Bn(3).mod_inverse(0)<line_sep>print("Got inverse")<block_end><assert_stmt>'No inverse'<in>str(excinfo.value)<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>x=Bn(0).mod_inverse(Bn(13))<line_sep>print("!!! Got inverse" x)<block_end><assert_stmt>'No inverse'<in>str(excinfo.value)<line_sep># with pytest.raises(Exception) as excinfo: # x = Bn(0).mod_inverse(Bn(13)) # print("Got inverse", x) #assert 'No inverse' in str(excinfo.value) <assert_stmt>Bn(10).mod_add(10 15)<eq>(10+10)%15<assert_stmt>Bn(10).mod_sub(100 15)<eq>(10-100)%15<assert_stmt>Bn(10).mod_mul(10 15)<eq>(10<times>10)%15<assert_stmt>Bn(-1).bool()<block_end><def_stmt>test_bn_right_arithmetic <block_start><assert_stmt>(1+Bn(1)<eq>Bn(2))<assert_stmt>(-1<times>Bn(-1)<eq>Bn(1))<assert_stmt>(10<times>Bn(10)<eq>Bn(100))<assert_stmt>(10-Bn(10)<eq>Bn(0))<assert_stmt>(10-Bn(100)<eq>Bn(-90))<assert_stmt>(10+(-Bn(10))<eq>Bn(0))<line_sep>s=-Bn(100)<assert_stmt>(10+s<eq>Bn(-90))<assert_stmt>(10-(-Bn(10))<eq>Bn(20))<assert_stmt>divmod(10 Bn(3))<eq>(Bn(3) Bn(1))<assert_stmt>10/Bn(3)<eq>Bn(3)<assert_stmt>10<floordiv>Bn(3)<eq>Bn(3)<assert_stmt>10%Bn(3)<eq>Bn(1)<assert_stmt>2<power>Bn(8)<eq>Bn(2<power>8)<assert_stmt>100<eq>Bn(100)<line_sep>pow(10 Bn(10))<block_end><def_stmt>test_bn_allocate # Test allocation <block_start>n0=Bn(10)<assert_stmt><true><assert_stmt>str(Bn())<eq>"0"<assert_stmt>str(Bn(1))<eq>"1"<assert_stmt>str(Bn(-1))<eq>"-1"<assert_stmt>Bn(15).hex()<eq>"0F"<assert_stmt>Bn(-15).hex()<eq>"-0F"<assert_stmt>int(Bn(5))<eq>5<assert_stmt>Bn(5).int()<eq>5<assert_stmt>0<le>Bn(15).random()<l>15<line_sep># Test copy o0=copy(n0)<line_sep>o1=deepcopy(n0)<assert_stmt>o0<eq>n0<assert_stmt>o1<eq>n0<line_sep># Test nonzero <assert_stmt><not>Bn()<assert_stmt><not>Bn(0)<assert_stmt>Bn(1)<assert_stmt>Bn(100)<block_end><def_stmt>test_bn_cmp <block_start><assert_stmt>Bn(1)<l>Bn(2)<assert_stmt>Bn(1)<le>Bn(2)<assert_stmt>Bn(2)<le>Bn(2)<assert_stmt>Bn(2)<eq>Bn(2)<assert_stmt><not>Bn(2)<eq><none><assert_stmt>Bn(2)<le>Bn(3)<assert_stmt>Bn(2)<l>Bn(3)<block_end><def_stmt>test_extras <block_start>two=Bn(2)<line_sep>two2=two.copy()<assert_stmt>two<eq>two2<block_end><def_stmt>test_odd <block_start><assert_stmt>Bn(1).is_odd()<assert_stmt>Bn(1).is_bit_set(0)<assert_stmt><not>Bn(1).is_bit_set(1)<assert_stmt>Bn(3).is_odd()<assert_stmt>Bn(3).is_bit_set(0)<assert_stmt>Bn(3).is_bit_set(1)<assert_stmt><not>Bn(0).is_odd()<assert_stmt><not>Bn(2).is_odd()<assert_stmt>Bn(100).is_bit_set(Bn(100).num_bits()-1)<block_end><def_stmt>test_check <block_start><with_stmt>pytest.raises(Exception)<as>excinfo<block_start>_check(<false>)<block_end><assert_stmt>'BN'<in>str(excinfo.value)<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>_check(-1)<block_end><assert_stmt>'BN'<in>str(excinfo.value)<with_stmt>pytest.raises(Exception)<as>excinfo<block_start>_check(0)<block_end><assert_stmt>'BN'<in>str(excinfo.value)<block_end><def_stmt>test_timing_exp <block_start>p=Bn.from_decimal("158261031819091141711717027498980088325079888681498417129323009913367867128038610210948802263526234270043507882496188624614467036250990588401775690578042934008692254417273606807265961724843618743242066301529332478013432957153823449143202719186309012133210922613102725038632605463022887306439116579645787938883")<line_sep>psmall=Bn.from_decimal("90123082853250477832412338337738008391831682960497136029451532639902615425459")<line_sep>xs=[p.random()<for>_ range(1000)]<line_sep>ys=[p.random()<for>_ range(1000)]<import_stmt>time<line_sep>print<line_sep>t0=time.time()<line_sep>X=[xi.mod_mul(yi psmall)<for>(xi yi) zip(xs ys)]<line_sep>t1=time.time()<line_sep>print("Mod_mul time: %.2fms"%((t1-t0)<times>1000.0/1000.0))<line_sep>t0=time.time()<line_sep>X=[xi.pow(yi p)<for>(xi yi) zip(xs ys)]<line_sep>t1=time.time()<line_sep>print(" Pow time: %.2fms"%((t1-t0)<times>1000.0/1000.0))<line_sep>ctx=BnCtx()<line_sep>t0=time.time()<line_sep>X=[xi.pow(yi p ctx)<for>(xi yi) zip(xs ys)]<line_sep>t1=time.time()<line_sep>print("Pow ctx time: %.2fms"%((t1-t0)<times>1000.0/1000.0))<block_end>
<import_from_stmt>angrmanagement.config Conf<import_from_stmt>..base_plugin BasePlugin<try_stmt><block_start><import_from_stmt>slacrs Slacrs<import_from_stmt>slacrs.model VariableRename FunctionRename ReverseEngineeringProgress<block_end><except_stmt>ImportError<as>ex<block_start>Slacrs=<none># type: Optional[type] VariableRename=<none># type: Optional[type] FunctionRename=<none># type: Optional[type] ReverseEngineeringProgress=<none><block_end># type: Optional[type] <class_stmt>LogReverseEngineeringPlugin(BasePlugin)<block_start>""" Plugin for logging the reverse engineering of a program """<def_stmt>__init__ self workspace<block_start><if_stmt><not>Slacrs<block_start><raise>Exception("Please install Slacrs to Initialize LogReverseEngineering Plugin")<block_end>super().__init__(workspace)<line_sep>self.session=Slacrs(database=Conf.checrs_backend_str).session()<line_sep>self.project=(self.workspace.instance.img_name<if>self.workspace.instance.img_name<else>self.workspace.instance.project.filename)<block_end><def_stmt>handle_variable_rename self func offset:int old_name:str new_name:str type_:str size:int<block_start>""" Logic to check if the same variable has already been renamed, if not add to the current session. """<if_stmt>offset<block_start>new_name=old_name<block_end>variable_rename=(self.session.query(VariableRename).filter(VariableRename.project<eq>self.project VariableRename.function<eq>func._name VariableRename.variable<eq>old_name ).first())<if_stmt>variable_rename<block_start>self.session.delete(variable_rename)<block_end>variable_rename=VariableRename()<line_sep>variable_rename.project=self.project<line_sep>variable_rename.function=func._name<line_sep>variable_rename.variable=new_name<line_sep>self.session.add(variable_rename)<block_end># <def_stmt>handle_function_rename self func old_name:str new_name:str<block_start>""" Logic to check if the same Function has already been renamed, if not add to the current session. """<line_sep>function_rename=(self.session.query(FunctionRename).filter(FunctionRename.project<eq>self.project FunctionRename.function<eq>old_name ).first())<if_stmt>old_name.startswith("sub")<or>function_rename<block_start><if_stmt>function_rename<block_start>self.session.delete(function_rename)<block_end>function_rename=FunctionRename()<line_sep>function_rename.project=self.project<line_sep>function_rename.function=new_name<line_sep>self.update_function_name(old_name new_name)<line_sep>self.session.add(function_rename)<block_end><block_end><def_stmt>handle_project_save self file_name:str<block_start>""" Commit the current session only when user saves the project, uncommitted session objects will be discarded at teardown. """<line_sep>variables_renamed_count=len(self.session.query(VariableRename).filter(VariableRename.project<eq>self.project).all())<line_sep>total_variables_count=len(self.workspace.instance.project.kb.variables.global_manager._variables)<line_sep>reverse_eng_progress=(self.session.query(ReverseEngineeringProgress).filter(ReverseEngineeringProgress.project<eq>self.project).first())<if_stmt><not>reverse_eng_progress<block_start>reverse_eng_progress=ReverseEngineeringProgress()<line_sep>self.session.add(reverse_eng_progress)<block_end>reverse_eng_progress.project=self.project<line_sep>reverse_eng_progress.variables_renamed=variables_renamed_count<line_sep>reverse_eng_progress.total_variables=total_variables_count<line_sep>(reverse_eng_progress.functions_renamed reverse_eng_progress.total_functions )=self.get_function_rename_stats()<line_sep>self.session.commit()<block_end><def_stmt>update_function_name self old_name new_name<block_start>""" To update the function names for all variable_rename if function gets renamed. """<line_sep>variables_renamed=self.session.query(VariableRename).filter(VariableRename.project<eq>self.project VariableRename.function<eq>old_name)<for_stmt>obj variables_renamed<block_start>obj.function=new_name<block_end><block_end><def_stmt>get_function_rename_stats self<block_start>functions_renamed=[func.function<for>func self.session.query(FunctionRename).filter(FunctionRename.project<eq>self.project).all()]<line_sep>functions_renamed_count=0<line_sep>total_functions_count=0<for_stmt>key self.workspace.instance.project.kb.functions._function_map<block_start><if_stmt>(self.workspace.instance.project.kb.functions._function_map[key]._name<in>functions_renamed)<block_start>functions_renamed_count=functions_renamed_count+1<line_sep>total_functions_count=total_functions_count+1<block_end><elif_stmt>self.workspace.instance.project.kb.functions._function_map[key]._name.startswith("sub")<block_start>total_functions_count=total_functions_count+1<block_end><block_end><return>[functions_renamed_count total_functions_count]<block_end><def_stmt>teardown self<block_start>self.session.close()<block_end><block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-04-29 10:20 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('pretixbase' '0052_team_teaminvite') ]<line_sep>operations=[migrations.RemoveField(model_name='eventpermission' name='event' ) migrations.RemoveField(model_name='eventpermission' name='user' ) migrations.RemoveField(model_name='organizerpermission' name='organizer' ) migrations.RemoveField(model_name='organizerpermission' name='user' ) migrations.RemoveField(model_name='event' name='permitted' ) migrations.RemoveField(model_name='organizer' name='permitted' ) migrations.AlterField(model_name='team' name='can_change_teams' field=models.BooleanField(default=<false> verbose_name='Can change teams and permissions') ) migrations.AlterField(model_name='team' name='limit_events' field=models.ManyToManyField(blank=<true> to='pretixbase.Event' verbose_name='Limit to events') ) migrations.DeleteModel(name='EventPermission' ) migrations.DeleteModel(name='OrganizerPermission' ) ]<block_end>
<import_stmt>pytest<import_from_stmt>cryptoadvance.specter.util.xpub convert_xpub_prefix get_xpub_fingerprint <line_sep>### Tests for xpub <def_stmt>test_convert_to_ypub ghost_machine_xpub_49 ghost_machine_ypub<block_start>new_prefix=b"\x04\x9d\x7c\xb2"<assert_stmt>convert_xpub_prefix(ghost_machine_xpub_49 new_prefix)<eq>ghost_machine_ypub<block_end><def_stmt>test_convert_to_zpub ghost_machine_xpub_84 ghost_machine_zpub<block_start>new_prefix=b"\x04\xb2\x47\x46"<assert_stmt>convert_xpub_prefix(ghost_machine_xpub_84 new_prefix)<eq>ghost_machine_zpub<block_end><def_stmt>test_convert_ypub_back ghost_machine_ypub ghost_machine_xpub_49<block_start>new_prefix=b"\x04\x88\xb2\x1e"<assert_stmt>convert_xpub_prefix(ghost_machine_ypub new_prefix)<eq>ghost_machine_xpub_49<block_end><def_stmt>test_convert_zpub_back ghost_machine_zpub ghost_machine_xpub_84<block_start>new_prefix=b"\x04\x88\xb2\x1e"<assert_stmt>convert_xpub_prefix(ghost_machine_zpub new_prefix)<eq>ghost_machine_xpub_84<block_end><def_stmt>test_convert_to_upub ghost_machine_tpub_49 ghost_machine_upub<block_start>new_prefix=b"\x04\x4a\x52\x62"<assert_stmt>convert_xpub_prefix(ghost_machine_tpub_49 new_prefix)<eq>ghost_machine_upub<block_end><def_stmt>test_convert_to_vpub ghost_machine_tpub_84 ghost_machine_vpub<block_start>new_prefix=b"\x04\x5f\x1c\xf6"<assert_stmt>convert_xpub_prefix(ghost_machine_tpub_84 new_prefix)<eq>ghost_machine_vpub<block_end><def_stmt>test_get_xpub_fingerprint ghost_machine_xpub_44# fingerprint from https://jlopp.github.io/xpub-converter/ <block_start><assert_stmt>get_xpub_fingerprint(ghost_machine_xpub_44).hex()<eq>"81f802e3"<block_end>
<import_stmt>os<import_from_stmt>juriscraper.pacer.email NotificationEmail S3NotificationEmail<import_from_stmt>tests TESTS_ROOT_EXAMPLES_PACER<import_from_stmt>tests.local.PacerParseTestCase PacerParseTestCase<line_sep>TESTS_ROOT_EXAMPLES_PACER_NEF=os.path.join(TESTS_ROOT_EXAMPLES_PACER "nef")<line_sep>TESTS_ROOT_EXAMPLES_PACER_NEF_S3=os.path.join(TESTS_ROOT_EXAMPLES_PACER "nef/s3")<class_stmt>PacerNotificationEmailTest(PacerParseTestCase)<block_start><def_stmt>setUp self<block_start>self.maxDiff=200000<block_end><def_stmt>test_notification_emails self<block_start>self.parse_files(TESTS_ROOT_EXAMPLES_PACER_NEF "*.html" NotificationEmail)<block_end><block_end><class_stmt>S3PacerNotificationEmailTest(PacerParseTestCase)<block_start><def_stmt>setUp self<block_start>self.maxDiff=200000<block_end><def_stmt>test_notification_emails_s3 self<block_start>self.parse_files(TESTS_ROOT_EXAMPLES_PACER_NEF_S3 "*.txt" S3NotificationEmail)<block_end><block_end>
# Copyright 2019 The Matrix.org Foundation CIC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>attr<line_sep>@attr.s<class_stmt>Message<block_start><pass><block_end>@attr.s<class_stmt>UnverifiedDevicesSignal(Message)<block_start>pan_user=attr.ib()<line_sep>room_id=attr.ib()<line_sep>room_display_name=attr.ib()<block_end>@attr.s<class_stmt>UnverifiedResponse(Message)<block_start>message_id=attr.ib()<line_sep>pan_user=attr.ib()<line_sep>room_id=attr.ib()<block_end>@attr.s<class_stmt>SendAnywaysMessage(UnverifiedResponse)<block_start><pass><block_end>@attr.s<class_stmt>CancelSendingMessage(UnverifiedResponse)<block_start><pass><block_end>@attr.s<class_stmt>KeyRequestMessage(Message)<block_start>pan_user=attr.ib(type=str)<line_sep>event=attr.ib()<block_end>@attr.s<class_stmt>_KeyShare(Message)<block_start>message_id=attr.ib()<line_sep>pan_user=attr.ib()<line_sep>user_id=attr.ib()<line_sep>device_id=attr.ib()<block_end>@attr.s<class_stmt>ContinueKeyShare(_KeyShare)<block_start><pass><block_end>@attr.s<class_stmt>CancelKeyShare(_KeyShare)<block_start><pass><block_end>@attr.s<class_stmt>DaemonResponse(Message)<block_start>message_id=attr.ib()<line_sep>pan_user=attr.ib()<line_sep>code=attr.ib()<line_sep>message=attr.ib()<block_end>@attr.s<class_stmt>UpdateUsersMessage(Message)<block_start>server=attr.ib()<line_sep>user_id=attr.ib()<line_sep>device_id=attr.ib()<block_end>@attr.s<class_stmt>UpdateDevicesMessage(Message)<block_start>pan_user=attr.ib(type=str)<line_sep>devices=attr.ib(type=dict)<block_end>@attr.s<class_stmt>_KeysOperation(Message)<block_start>message_id=attr.ib()<line_sep>pan_user=attr.ib()<line_sep>file_path=attr.ib()<line_sep>passphrase=attr.ib()<block_end>@attr.s<class_stmt>ImportKeysMessage(_KeysOperation)<block_start><pass><block_end>@attr.s<class_stmt>ExportKeysMessage(_KeysOperation)<block_start><pass><block_end>@attr.s<class_stmt>_VerificationMessage(Message)<block_start>message_id=attr.ib()<line_sep>pan_user=attr.ib()<line_sep>user_id=attr.ib()<line_sep>device_id=attr.ib()<block_end>@attr.s<class_stmt>DeviceVerifyMessage(_VerificationMessage)<block_start><pass><block_end>@attr.s<class_stmt>DeviceUnverifyMessage(_VerificationMessage)<block_start><pass><block_end>@attr.s<class_stmt>DeviceBlacklistMessage(_VerificationMessage)<block_start><pass><block_end>@attr.s<class_stmt>DeviceUnblacklistMessage(_VerificationMessage)<block_start><pass><block_end>@attr.s<class_stmt>SasMessage(_VerificationMessage)<block_start><pass><block_end>@attr.s<class_stmt>StartSasMessage(SasMessage)<block_start><pass><block_end>@attr.s<class_stmt>CancelSasMessage(SasMessage)<block_start><pass><block_end>@attr.s<class_stmt>ConfirmSasMessage(SasMessage)<block_start><pass><block_end>@attr.s<class_stmt>AcceptSasMessage(SasMessage)<block_start><pass><block_end>@attr.s<class_stmt>_SasSignal<block_start>pan_user=attr.ib()<line_sep>user_id=attr.ib()<line_sep>device_id=attr.ib()<line_sep>transaction_id=attr.ib()<block_end>@attr.s<class_stmt>InviteSasSignal(_SasSignal)<block_start><pass><block_end>@attr.s<class_stmt>ShowSasSignal(_SasSignal)<block_start>emoji=attr.ib()<block_end>@attr.s<class_stmt>SasDoneSignal(_SasSignal)<block_start><pass><block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_stmt>json<import_stmt>textwrap<import_stmt>unittest<import_from_stmt>EulerPy.problem Problem<import_from_stmt>EulerPy.utils human_time<line_sep>EULER_DIR=os.path.dirname(os.path.dirname(__file__))<line_sep>EULER_DATA=os.path.join(EULER_DIR 'EulerPy' 'data')<class_stmt>EulerPyUtils(unittest.TestCase)<block_start><def_stmt>test_problem_format self<block_start>""" Ensure each parsed problem only contains one problem (that one problem does not "bleed" into the next one due to an issue with line breaks) """<line_sep># Determine largest problem in problems.txt problems_file=os.path.join(EULER_DATA 'problems.txt')<with_stmt>open(problems_file)<as>f<block_start><for_stmt>line f<block_start><if_stmt>line.startswith('Problem ')<block_start>largest_problem=line.split(' ')[1]<block_end><block_end><block_end><for_stmt>problem range(1 int(largest_problem)+1)<block_start>problemText=Problem(problem).text<line_sep>msg="Error encountered when parsing problem {}.".format(problem)<line_sep>self.assertFalse('========='<in>problemText msg=msg)<line_sep>self.assertFalse('\n\n\n'<in>problemText msg=msg)<block_end><block_end><def_stmt>test_expected_problem self<block_start>"""Check that problem #1 returns the correct problem text"""<line_sep>problem_one=textwrap.dedent(""" If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. """)<line_sep>self.assertEqual(problem_one.strip() Problem(1).text)<block_end><def_stmt>test_filename_format self<block_start>"""Check that filenames are being formatted correctly"""<line_sep>self.assertEqual(Problem(1).filename() "001.py")<line_sep>self.assertEqual(Problem(10).filename() "010.py")<line_sep>self.assertEqual(Problem(100).filename() "100.py")<block_end><def_stmt>test_time_format self<block_start>self.assertEqual(human_time(100000) '1d 3h 46m 40s')<block_end><def_stmt>test_problem_resources self<block_start>"""Ensure resources in `/data` match `resources.json`"""<line_sep>resources_path=os.path.join(EULER_DATA 'resources')<def_stmt>_resource_check filename seen_files<block_start>path=os.path.join(resources_path filename)<line_sep># Check that resource exists in `/data` self.assertTrue(os.path.isfile(path) '%s does not exist.'%filename)<line_sep># Add resource to set `seen_files` seen_files.add(filename)<block_end><with_stmt>open(os.path.join(EULER_DATA 'resources.json'))<as>f<block_start>resource_dict=json.load(f)<block_end>seen_files=set()<for_stmt>item (v<for>k,v resource_dict.items())<block_start><if_stmt>isinstance(item list)<block_start><for_stmt>subitem item<block_start>_resource_check(subitem seen_files)<block_end><block_end><else_stmt><block_start>_resource_check(item seen_files)<block_end><block_end>self.assertEqual(seen_files set(os.listdir(resources_path)))<block_end><block_end>
<import_stmt>pytest<line_sep>pytest.importorskip("yubico")<def_stmt>test_load_module <block_start>__import__("modules.contrib.yubikey")<block_end>
<import_from_stmt>django.conf settings<import_from_stmt>django.utils.unittest skipIf<def_stmt>skipIfCustomUser test_func<block_start>""" Skip a test if a custom user model is in use. """<line_sep><return>skipIf(settings.AUTH_USER_MODEL<ne>'auth.User' 'Custom user model in use')(test_func)<block_end>
# some unit tests for the bytecode decoding <import_stmt>py<import_from_stmt>rpython.jit.metainterp pyjitpl<import_from_stmt>rpython.jit.metainterp jitprof<import_from_stmt>rpython.jit.metainterp.history ConstInt<import_from_stmt>rpython.jit.metainterp.history History IntFrontendOp<import_from_stmt>rpython.jit.metainterp.resoperation ResOperation rop InputArgInt<import_from_stmt>rpython.jit.metainterp.optimizeopt.util equaloplists<import_from_stmt>rpython.jit.codewriter.jitcode JitCode<def_stmt>test_portal_trace_positions <block_start>py.test.skip("bleh, too direct test, rewrite or kill")<class_stmt>jitdriver_sd<block_start>index=0<class_stmt>warmstate<block_start>@staticmethod<def_stmt>get_unique_id *args<block_start><return>0<block_end><block_end><class_stmt>jitdriver<block_start>is_recursive=<true><block_end><block_end>jitcode=JitCode("f")<line_sep>jitcode.setup(<none>)<line_sep>portal=JitCode("portal")<line_sep>portal.jitdriver_sd=jitdriver_sd<line_sep>portal.setup(<none>)<class_stmt>FakeStaticData<block_start>cpu=<none><line_sep>warmstate=<none><line_sep>warmrunnerdesc=<none><line_sep>mainjitcode=portal<block_end>metainterp=pyjitpl.MetaInterp(FakeStaticData() FakeStaticData())<line_sep>metainterp.framestack=[]<class_stmt>FakeHistory<block_start>operations=[]<line_sep>@staticmethod<def_stmt>record *args<block_start><pass><block_end><block_end>history=metainterp.history=FakeHistory()<line_sep>metainterp.newframe(portal "green1")<line_sep>history.operations.append(1)<line_sep>metainterp.newframe(jitcode)<line_sep>history.operations.append(2)<line_sep>metainterp.newframe(portal "green2")<line_sep>history.operations.append(3)<line_sep>metainterp.popframe()<line_sep>history.operations.append(4)<line_sep>metainterp.popframe()<line_sep>history.operations.append(5)<line_sep>metainterp.popframe()<line_sep>history.operations.append(6)<assert_stmt>metainterp.portal_trace_positions<eq>[("green1" 0) ("green2" 2) (<none> 3) (<none> 5)]<assert_stmt>metainterp.find_biggest_function()<eq>"green1"<line_sep>metainterp.newframe(portal "green3")<line_sep>history.operations.append(7)<line_sep>metainterp.newframe(jitcode)<line_sep>history.operations.append(8)<assert_stmt>metainterp.portal_trace_positions<eq>[("green1" 0) ("green2" 2) (<none> 3) (<none> 5) ("green3" 6)]<assert_stmt>metainterp.find_biggest_function()<eq>"green1"<line_sep>history.operations.extend([9 10 11 12])<assert_stmt>metainterp.find_biggest_function()<eq>"green3"<block_end><def_stmt>test_remove_consts_and_duplicates <block_start><class_stmt>FakeStaticData<block_start>cpu=<none><line_sep>all_descrs=[]<line_sep>warmrunnerdesc=<none><block_end><def_stmt>is_another_box_like box referencebox<block_start><assert_stmt>box<is><not>referencebox<assert_stmt>box.type<eq>referencebox.type<assert_stmt>box.getint()<eq>referencebox.getint()<line_sep><return><true><block_end>metainterp=pyjitpl.MetaInterp(FakeStaticData() <none>)<line_sep>metainterp.history=History()<line_sep>b1=IntFrontendOp(1)<line_sep>b1.setint(1)<line_sep>b2=IntFrontendOp(2)<line_sep>b2.setint(2)<line_sep>c3=ConstInt(3)<line_sep>boxes=[b1 b2 b1 c3]<line_sep>dup={}<line_sep>metainterp.history.set_inputargs([b1 b2] FakeStaticData())<line_sep>metainterp.remove_consts_and_duplicates(boxes 4 dup)<assert_stmt>boxes[0]<is>b1<assert_stmt>boxes[1]<is>b2<assert_stmt>is_another_box_like(boxes[2] b1)<assert_stmt>is_another_box_like(boxes[3] c3)<line_sep>inp,operations=metainterp.history.trace.unpack()<line_sep>remap=dict(zip([b1 b2] inp))<assert_stmt>equaloplists(operations [ResOperation(rop.SAME_AS_I [b1]) ResOperation(rop.SAME_AS_I [c3]) ] remap=remap)<assert_stmt>dup<eq>{b1:<none> b2:<none>}<line_sep># <block_end><def_stmt>test_get_name_from_address <block_start><class_stmt>FakeMetaInterpSd(pyjitpl.MetaInterpStaticData)<block_start><def_stmt>__init__ self<block_start><pass><block_end><block_end>metainterp_sd=FakeMetaInterpSd()<line_sep>metainterp_sd.setup_list_of_addr2name([(123 'a') (456 'b')])<assert_stmt>metainterp_sd.get_name_from_address(123)<eq>'a'<assert_stmt>metainterp_sd.get_name_from_address(456)<eq>'b'<assert_stmt>metainterp_sd.get_name_from_address(789)<eq>''<block_end>
<import_from_stmt>django.conf.urls.static static<import_from_stmt>django.conf settings<import_from_stmt>django.urls path<import_from_stmt>django.contrib admin<import_from_stmt>django.conf.urls include<import_from_stmt>rest_framework permissions<import_from_stmt>drf_yasg.views get_schema_view<import_from_stmt>drf_yasg openapi<import_from_stmt>config.api api<import_from_stmt>.site DashboardSite<line_sep>admin.site=DashboardSite()<line_sep>admin.sites.site=admin.site<line_sep>admin.autodiscover()<line_sep>schema_view=get_schema_view(openapi.Info(title="Froggy's Service API" default_version='v1' contact=openapi.Contact(email=settings.SERVER_EMAIL) license=openapi.License(name="MIT License") ) url=settings.DOMAIN public=<false> permission_classes=(permissions.AllowAny ) )<line_sep>urlpatterns=[# All Kubernetes services must serve a 200 page at '/', set admin page as index path('' admin.site.urls name='admin') path('api/' include(api.urls)) path('api-auth/' include('rest_framework.urls' namespace='rest_framework')) path('api/swagger/' schema_view.with_ui('swagger' cache_timeout=0) name='schema-swagger') ]<if_stmt>settings.DEBUG<block_start>urlpatterns<augadd>static(settings.MEDIA_URL document_root=settings.MEDIA_ROOT)<line_sep>urlpatterns[0]=path('admin/' admin.site.urls name='admin')<block_end>
<import_stmt>clr<line_sep>clr.AddReference('RevitAPI')<import_from_stmt>Autodesk.Revit.DB *<line_sep>clr.AddReference("RevitNodes")<import_stmt>Revit<line_sep>clr.ImportExtensions(Revit.Elements)<line_sep>clr.AddReference("RevitServices")<import_stmt>RevitServices<import_from_stmt>RevitServices.Persistence DocumentManager<line_sep>doc=DocumentManager.Instance.CurrentDBDocument<line_sep>faminsts=UnwrapElement(IN[0])<line_sep>elementlist=list()<for_stmt>item faminsts<block_start><try_stmt><block_start>elementlist.append(item.Host.ToDSType(<true>))<block_end><except_stmt># if that doesn't work, maybe it's a WallSweep <block_start><try_stmt><block_start>hostidlist=list()<for_stmt>host item.GetHostIds()<block_start>hostidlist.append(doc.GetElement(host).ToDSType(<true>))<block_end>elementlist.append(hostidlist)<block_end><except_stmt><block_start>elementlist.append(list())<block_end><block_end><block_end>OUT=elementlist<line_sep>
"""add display week numbers Revision ID: 1bd634091036 Revises: <PASSWORD> Create Date: 2015-09-15 11:51:46.739150 """<line_sep># revision identifiers, used by Alembic. revision='1bd634091036'<line_sep>down_revision='<PASSWORD>'<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<def_stmt>upgrade <block_start>op.add_column('user' sa.Column('display_week_numbers' sa.Boolean))<block_end><def_stmt>downgrade <block_start>op.drop_column('user' 'display_week_numbers')<block_end>
''' Author: <NAME> Date: 8/17/2018 Description: Creates a dataframe with moving averages and MACD oscillator '''<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>datetime datetime timedelta<import_from_stmt>iexfinance get_historical_data<line_sep>moving_avg1=10<line_sep>moving_avg2=20<line_sep>ticker="BABA"<line_sep>now=datetime.now()<line_sep>start=now-timedelta(days=90)<line_sep>df=get_historical_data(ticker start=start end=now output_format='pandas')<def_stmt>macd dat<block_start>dat['10dma']=dat['close'].rolling(window=moving_avg1 min_periods=1).mean()<line_sep>dat['20dma']=dat['close'].rolling(window=moving_avg2 min_periods=1).mean()<line_sep><return>dat<block_end><def_stmt>add_macd df<block_start>df=macd(df)<line_sep>df['position']=0<line_sep>df['position'][moving_avg1:]=np.where(df['10dma'][moving_avg1:]<ge>df['20dma'][moving_avg1:] 1 0)<line_sep>df['signals']=df['position'].diff()<line_sep>df['oscillator']=df['10dma']-df['20dma']<line_sep><return>df<block_end>df=add_macd(df)<line_sep># print(df) print(df.loc[df['signals']<eq>1])<line_sep>print(df.loc[df['signals']<eq>-1])<line_sep>
<import_from_stmt>ff_client FfClient FfConfig FfRequest<import_stmt>unittest<import_stmt>logging<import_stmt>time<class_stmt>TestFfClientSinglePacket(unittest.TestCase)<block_start><def_stmt>test_create_request_packets_for_get_request self<block_start>client=FfClient(FfConfig(ip_address='127.0.0.1' port=8080 log_level=logging.DEBUG))<line_sep>http_request="GET / HTTP/1.1\nHost: google.com.au\n\n"<line_sep>packets=client.create_request_packets(http_request https=<false>)<line_sep>payload_options_length=11+3# Timstamp option + EOL option self.assertEqual(1 len(packets))<line_sep>packet1_buff=packets[0].payload<line_sep>packet1_len=packets[0].length<line_sep>ptr=0<line_sep>self.assertEqual(73 packet1_len)<line_sep># Request version self.assertEqual(FfRequest.Version.V1 packet1_buff[ptr]<lshift>8|packet1_buff[ptr+1])<line_sep>ptr<augadd>2<line_sep># Request ID self.assertNotEqual(0 (packet1_buff[ptr]<lshift>56|packet1_buff[ptr+1]<lshift>48|packet1_buff[ptr+2]<lshift>40|packet1_buff[ptr+3]<lshift>32|packet1_buff[ptr+4]<lshift>24|packet1_buff[ptr+5]<lshift>16|packet1_buff[ptr+6]<lshift>8|packet1_buff[ptr+7]))<line_sep>ptr<augadd>8<line_sep># Total length self.assertEqual(len(http_request)+payload_options_length (packet1_buff[ptr]<lshift>24|packet1_buff[ptr+1]<lshift>16|packet1_buff[ptr+2]<lshift>8|packet1_buff[ptr+3]))<line_sep>ptr<augadd>4<line_sep># Chunk offset self.assertEqual(0 (packet1_buff[ptr]<lshift>24|packet1_buff[ptr+1]<lshift>16|packet1_buff[ptr+2]<lshift>8|packet1_buff[ptr+3]))<line_sep>ptr<augadd>4<line_sep># Chunk length self.assertEqual(len(http_request)+payload_options_length (packet1_buff[ptr]<lshift>8|packet1_buff[ptr+1]))<line_sep>ptr<augadd>2<line_sep># Break option type self.assertEqual(FfRequest.Option.Type.BREAK packet1_buff[ptr])<line_sep>ptr<augadd>1<line_sep># Break option length self.assertEqual(0 packet1_buff[ptr]<lshift>16|packet1_buff[ptr+1])<line_sep>ptr<augadd>2<line_sep># Timestamp option type self.assertEqual(FfRequest.Option.Type.TIMESTAMP packet1_buff[ptr])<line_sep>ptr<augadd>1<line_sep># Timestamp option length self.assertEqual(8 packet1_buff[ptr]<lshift>16|packet1_buff[ptr+1])<line_sep>ptr<augadd>2<line_sep># Timestamp option value self.assertAlmostEqual(time.time() packet1_buff[ptr]<lshift>56|packet1_buff[ptr+1]<lshift>48|packet1_buff[ptr+2]<lshift>40|packet1_buff[ptr+3]<lshift>32|packet1_buff[ptr+4]<lshift>24|packet1_buff[ptr+5]<lshift>16|packet1_buff[ptr+6]<lshift>8|packet1_buff[ptr+7] delta=5)<line_sep>ptr<augadd>8<line_sep># EOL option type self.assertEqual(FfRequest.Option.Type.EOL packet1_buff[ptr])<line_sep>ptr<augadd>1<line_sep># EOL option length self.assertEqual(0 packet1_buff[ptr]<lshift>16|packet1_buff[ptr+1])<line_sep>ptr<augadd>2<line_sep># Payload self.assertEqual(bytearray(http_request.encode('utf8')) packet1_buff[ptr:packet1_len])<block_end><block_end>
<import_from_stmt>prompt_toolkit print_formatted_text<import_from_stmt>prompt_toolkit.formatted_text FormattedText<import_from_stmt>prompt_toolkit.styles Style<import_from_stmt>neo.UserPreferences preferences<import_stmt>os<import_stmt>sys<line_sep>token_style=Style.from_dict({"command":preferences.token_style['Command'] "neo":preferences.token_style['Neo'] "default":preferences.token_style['Default'] "number":preferences.token_style['Number'] })<class_stmt>PromptPrinter()<block_start><def_stmt>__init__ self<block_start>self.printer=self._internal_prompt_print<block_end><def_stmt>reset_printer self<block_start>self.printer=self._internal_prompt_print<block_end><def_stmt>_internal_prompt_print self *args **kwargs<block_start>kwargs['sep']=kwargs.pop('sep' ' ')<line_sep>kwargs['end']=kwargs.pop('end' '\n')<line_sep>kwargs['file']=kwargs.pop('file' sys.stdout)<line_sep>kwargs['style']=token_style<line_sep>frags=[]<for_stmt>a args<block_start><if_stmt>isinstance(a FormattedText)<block_start>frags.append(a)<block_end><else_stmt><block_start>frags.append(FormattedText([("class:command" str(a))]))<block_end><block_end>print_formatted_text(*frags **kwargs)<block_end><def_stmt>print self *args **kwargs<block_start><if_stmt>'NEOPYTHON_UNITTEST'<in>os.environ<block_start>print(*args **kwargs)<block_end><else_stmt><block_start>self.printer(*args **kwargs)<block_end><block_end><block_end>pp=PromptPrinter()<def_stmt>prompt_print *args **kwargs<block_start>pp.print(*args **kwargs)<block_end>
<import_from_future_stmt> print_function division<line_sep># <import_stmt>sys os<line_sep>os.environ['KMP_DUPLICATE_LIB_OK']='True'# uncomment this line if omp error occurs on OSX for python 3 os.environ['OMP_NUM_THREADS']='1'# set number of OpenMP threads to run in parallel os.environ['MKL_NUM_THREADS']='1'# set number of MKL threads to run in parallel # quspin_path=os.path.join(os.getcwd() "../../")<line_sep>sys.path.insert(0 quspin_path)<line_sep>################################################################################ # example 24 # # This example shows how to use the `user_basis` to define Majorana operators. # ################################################################################ <import_from_stmt>quspin.operators hamiltonian# Hamiltonians and operators <import_from_stmt>quspin.basis spinless_fermion_basis_1d# Hilbert space fermion basis_1d <import_from_stmt>quspin.basis.user user_basis# Hilbert space user basis <import_from_stmt>quspin.basis.user next_state_sig_32 op_sig_32 map_sig_32 count_particles_sig_32# user basis data types signatures <import_from_stmt>numba carray cfunc jit# numba helper functions <import_from_stmt>numba uint32 int32# numba data types <import_stmt>numpy<as>np<import_from_stmt>scipy.special comb<line_sep>np.set_printoptions(suppress='True' precision=6)<line_sep># N=6# lattice sites # ############ create soinless fermion user basis object ############# # @jit(uint32(uint32 uint32) locals=dict(f_count=uint32 ) nopython=<true> nogil=<true>)<def_stmt>_count_particles_32 state site_ind# auxiliary function to count number of fermions, i.e. 1's in bit configuration of the state, up to site site_ind # CAUTION: 32-bit integers code only! <block_start>f_count=state&((0x7FFFFFFF)<rshift>(31-site_ind))<line_sep>f_count=f_count-((f_count<rshift>1)&0x55555555)<line_sep>f_count=(f_count&0x33333333)+((f_count<rshift>2)&0x33333333)<line_sep><return>(((f_count+(f_count<rshift>4))&0x0F0F0F0F)<times>0x01010101)<rshift>24<block_end># @cfunc(op_sig_32 locals=dict(s=int32 sign=int32 n=int32 b=uint32 f_count=uint32) )<def_stmt>op op_struct_ptr op_str site_ind N args# using struct pointer to pass op_struct_ptr back to C++ see numba Records <block_start>op_struct=carray(op_struct_ptr 1)[0]<line_sep>err=0<line_sep># site_ind=N-site_ind-1# convention for QuSpin for mapping from bits to sites. ##### f_count=_count_particles_32(op_struct.state site_ind)<line_sep>##### sign=-1<if>f_count&1<else>1<line_sep>n=(op_struct.state<rshift>site_ind)&1# either 0 or 1 b=(1<lshift>site_ind)<line_sep># <if_stmt>op_str<eq>120# "x" is integer value 120 = ord("x") <block_start>op_struct.state<augxor>b<line_sep>op_struct.matrix_ele<augmul>sign<block_end><elif_stmt>op_str<eq>121# "y" is integer value 120 = ord("y") <block_start>op_struct.state<augxor>b<line_sep>op_struct.matrix_ele<augmul>-1.0j<times>sign<times>((n<lshift>1)-1)<block_end><elif_stmt>op_str<eq>43# "+" is integer value 43 = ord("+") <block_start>op_struct.matrix_ele<augmul>(0.0<if>n<else>sign)<line_sep>op_struct.state<augxor>b<block_end># create fermion <elif_stmt>op_str<eq>45# "-" is integer value 45 = ord("-") <block_start>op_struct.matrix_ele<augmul>(sign<if>n<else>0.0)<line_sep>op_struct.state<augxor>b<block_end># create fermion <elif_stmt>op_str<eq>110# "n" is integer value 110 = ord("n") <block_start>op_struct.matrix_ele<augmul>n<block_end><elif_stmt>op_str<eq>73# "I" is integer value 73 = ord("I") <block_start><pass><block_end><else_stmt><block_start>op_struct.matrix_ele=0<line_sep>err=-1<block_end># <return>err<block_end>op_args=np.array([] dtype=np.uint32)<line_sep># ###### define symmetry maps # @cfunc(map_sig_32 locals=dict(shift=uint32 xmax=uint32 x1=uint32 x2=uint32 period=int32 l=int32 f_count1=int32 f_count2=int32))<def_stmt>translation x N sign_ptr args<block_start>""" works for all system sizes N. """<line_sep>shift=args[0]# translate state by shift sites period=N# periodicity/cyclicity of translation xmax=args[1]<line_sep># l=(shift+period)%period<line_sep>x1=(x<rshift>(period-l))<line_sep>x2=((x<lshift>l)&xmax)<line_sep># ##### # count number of fermions, i.e. 1's in bit configuration of x1 f_count1=_count_particles_32(x1 period)<line_sep># count number of fermions, i.e. 1's in bit configuration of x2 f_count2=_count_particles_32(x2 period)<line_sep>##### # compute fermion sign sign_ptr[0]<augmul>(-1<if>((f_count1&1)&(f_count2&1)&1)<else>1)<line_sep># <return>(x2|x1)<block_end>T_args=np.array([1 (1<lshift>N)-1] dtype=np.uint32)<line_sep># @cfunc(map_sig_32 locals=dict(out=uint32 s=uint32 f_count=int32))<def_stmt>parity x N sign_ptr args<block_start>""" works for all system sizes N. """<line_sep>out=0<line_sep>s=args[0]<line_sep># ##### # count number of fermions, i.e. 1's in bit configuration of the state f_count=_count_particles_32(x N)<line_sep>##### sign_ptr[0]<augmul>(-1<if>((f_count&2)<and>1)<else>1)<line_sep># out<augxor>(x&1)<line_sep>x<augrshift>1<while_stmt>(x)<block_start>out<auglshift>1<line_sep>out<augxor>(x&1)<line_sep>x<augrshift>1<line_sep>s<augsub>1<block_end># out<auglshift>s<line_sep><return>out<block_end>P_args=np.array([N-1] dtype=np.uint32)<line_sep># ###### construct user_basis # define anti-commuting bits -- fermion signs on the integer bits (not sites!) that represent a fermion degree of freedom noncommuting_bits=[(np.arange(N) -1)]# fermion signs are counted w.r.t. the shift operator << # define maps dict maps=dict(T_block=(translation N 0 T_args) P_block=(parity 2 0 P_args) )<line_sep>#maps = dict(P_block=(parity,2,0,P_args), ) #maps = dict(T_block=(translation,N,0,T_args) ) op_dict=dict(op=op op_args=op_args)<line_sep># create user basiss basis=user_basis(np.uint32 N op_dict allowed_ops=set("xy+-nI") sps=2 noncommuting_bits=noncommuting_bits **maps)<line_sep># # print(basis)<line_sep># ############ create and compare Hamiltonians ############# # ##### Hamiltonian in using Majoranas # J=-np.sqrt(2.0)# hoppping U=+1.0# nn interaction # hop_term_p=[[+0.5j<times>J j (j+1)%N]<for>j range(N)]<line_sep>hop_term_m=[[-0.5j<times>J j (j+1)%N]<for>j range(N)]<line_sep>density_term=[[+0.5j<times>U j j]<for>j range(N)]<line_sep>int_term=[[-0.25<times>U j j (j+1)%N (j+1)%N]<for>j range(N)]<line_sep>id_term=[[0.25<times>U j]<for>j range(N)]<line_sep># static=[['xy' hop_term_p] ['yx' hop_term_m] # kinetic energy ['I' id_term] ['xy' density_term] ['xyxy' int_term] # nn interaction energy ]<line_sep>dynamic=[]<line_sep># no_checks=dict(check_symm=<false> check_pcon=<false> check_herm=<false>)<line_sep>H_majorana=hamiltonian(static [] basis=basis dtype=np.float64 **no_checks)<line_sep># # ##### Hamiltonian using complex fermions # # hopping_pm=[[+J j (j+1)%N]<for>j range(N)]<line_sep>hopping_mp=[[-J j (j+1)%N]<for>j range(N)]<line_sep>nn_int=[[U j (j+1)%N]<for>j range(N)]<line_sep># static=[["+-" hopping_pm] ["-+" hopping_mp] ["nn" nn_int]]<line_sep>dynamic=[]<line_sep># no_checks=dict(check_symm=<false> check_pcon=<false> check_herm=<false>)<line_sep>H=hamiltonian(static [] basis=basis dtype=np.float64 **no_checks)<line_sep>print(H.toarray())<line_sep>print()<line_sep>print(H_majorana.toarray())<line_sep>print()<line_sep>print(np.linalg.norm((H-H_majorana).toarray()))<line_sep>
<import_stmt>os<import_stmt>time<import_stmt>torch<import_stmt>queue<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<as>optim<import_from_stmt>utils.drivers train test get_dataloader<import_from_stmt>model.MobileNetV2 MobileNetV2 InvertedResidual<import_from_stmt>pruner.fp_mbnetv2 FilterPrunerMBNetV2<import_from_stmt>pruner.fp_resnet FilterPrunerResNet<class_stmt>LeGR<block_start><def_stmt>__init__ self dataset datapath model pruner rank_type='l2_weight' batch_size=32 lr=1e-3 safeguard=0 global_random_rank=<false> lub='' device='cuda'<block_start>self.device=device<line_sep>self.sample_for_ranking=1<if>rank_type<in>['l1_weight' 'l2_weight' 'l2_bn' 'l1_bn' 'l2_bn_param']<else>5000<line_sep>self.safeguard=safeguard<line_sep>self.lub=lub<line_sep>self.lr=lr<line_sep>self.img_size=32<if>'CIFAR'<in>args.dataset<else>224<line_sep>self.batch_size=batch_size<line_sep>self.rank_type=rank_type<line_sep>self.train_loader,self.val_loader,self.test_loader=get_dataloader(self.img_size dataset datapath batch_size args.no_val)<if_stmt>'CIFAR100'<in>dataset<block_start>num_classes=100<block_end><elif_stmt>'CIFAR10'<in>dataset<block_start>num_classes=10<block_end><elif_stmt>'ImageNet'<in>dataset<block_start>num_classes=1000<block_end><elif_stmt>'CUB200'<in>dataset<block_start>num_classes=200<block_end>self.model=model<line_sep>self.criterion=torch.nn.CrossEntropyLoss()<line_sep>self.pruner=eval(pruner)(self.model rank_type num_classes safeguard random=global_random_rank device=device)<line_sep>self.model.train()<block_end><def_stmt>learn_ranking_ea self name model_desc tau_hat long_ft target<block_start>name=name<line_sep>start_t=time.time()<line_sep>self.pruner.reset()<line_sep>self.pruner.model.eval()<line_sep>self.pruner.forward(torch.zeros((1 3 self.img_size self.img_size) device=self.device))<line_sep>original_flops=self.pruner.cur_flops<line_sep>original_size=self.pruner.cur_size<line_sep>print('Before Pruning, FLOPs: {:.3f}M, Size: {:.3f}M'.format(original_flops/1e6 original_size/1e6))<line_sep>mean_loss=[]<line_sep>num_layers=len(self.pruner.filter_ranks)<line_sep>minimum_loss=10<line_sep>best_perturbation=<none><line_sep>POPULATIONS=64<line_sep>SAMPLES=16<line_sep>GENERATIONS=400<line_sep>SCALE_SIGMA=1<line_sep>MUTATE_PERCENT=0.1<line_sep>index_queue=queue.Queue(POPULATIONS)<line_sep>population_loss=np.zeros(0)<line_sep>population_data=[]<line_sep>original_dist=self.pruner.filter_ranks.copy()<line_sep>original_dist_stat={}<for_stmt>k sorted(original_dist)<block_start>a=original_dist[k].cpu().numpy()<line_sep>original_dist_stat[k]={'mean':np.mean(a) 'std':np.std(a)}<block_end># Initialize Population <for_stmt>i range(GENERATIONS)<block_start>step_size=1-(float(i)/(GENERATIONS<times>1.25))<line_sep># Perturn distribution perturbation=[]<if_stmt>i<eq>POPULATIONS-1<block_start><for_stmt>k sorted(self.pruner.filter_ranks.keys())<block_start>perturbation.append((1 0))<block_end><block_end><elif_stmt>i<l>POPULATIONS-1<block_start><for_stmt>k sorted(self.pruner.filter_ranks.keys())<block_start>scale=np.exp(float(np.random.normal(0 SCALE_SIGMA)))<line_sep>shift=float(np.random.normal(0 original_dist_stat[k]['std']))<line_sep>perturbation.append((scale shift))<block_end><block_end><else_stmt><block_start>mean_loss.append(np.mean(population_loss))<line_sep>sampled_idx=np.random.choice(POPULATIONS SAMPLES)<line_sep>sampled_loss=population_loss[sampled_idx]<line_sep>winner_idx_=np.argmin(sampled_loss)<line_sep>winner_idx=sampled_idx[winner_idx_]<line_sep>oldest_index=index_queue.get()<line_sep># Mutate winner base=population_data[winner_idx]<line_sep># Perturb distribution mnum=int(MUTATE_PERCENT<times>len(self.pruner.filter_ranks))<line_sep>mutate_candidate=np.random.choice(len(self.pruner.filter_ranks) mnum)<for_stmt>k sorted(self.pruner.filter_ranks.keys())<block_start>scale=1<line_sep>shift=0<if_stmt>k<in>mutate_candidate<block_start>scale=np.exp(float(np.random.normal(0 SCALE_SIGMA<times>step_size)))<line_sep>shift=float(np.random.normal(0 original_dist_stat[k]['std']))<block_end>perturbation.append((scale<times>base[k][0] shift+base[k][1]))<block_end><block_end># Given affine transformations, rank and prune self.pruner.pruning_with_transformations(original_dist perturbation target)<line_sep># Re-measure the pruned model in terms of FLOPs and size self.pruner.reset()<line_sep>self.pruner.model.eval()<line_sep>self.pruner.forward(torch.zeros((1 3 self.img_size self.img_size) device=self.device))<line_sep>cur_flops=self.pruner.cur_flops<line_sep>cur_size=self.pruner.cur_size<line_sep>self.pruner.model=self.pruner.model.to(self.device)<line_sep>print('Density: {:.3f}% ({:.3f}M/{:.3f}M) | FLOPs: {:.3f}% ({:.3f}M/{:.3f}M)'.format(float(cur_size)/original_size<times>100 cur_size/1e6 original_size/1e6 float(cur_flops)/original_flops<times>100 cur_flops/1e6 original_flops/1e6))<line_sep>print('Fine tuning to recover from pruning iteration.')<line_sep>optimizer=optim.SGD(self.pruner.model.parameters() lr=self.lr momentum=0.9 weight_decay=5e-4)<if_stmt>tau_hat<g>0<block_start>train(self.model self.train_loader self.val_loader optimizer epochs=1 steps=tau_hat run_test=<false> device=self.device)<block_end>acc,loss=test(self.model self.val_loader device=self.device get_loss=<true>)<if_stmt>np.mean(loss)<l>minimum_loss<block_start>minimum_loss=np.mean(loss)<line_sep>best_perturbation=perturbation<block_end><if_stmt>i<l>POPULATIONS<block_start>index_queue.put(i)<line_sep>population_data.append(perturbation)<line_sep>population_loss=np.append(population_loss [np.mean(loss)])<block_end><else_stmt><block_start>index_queue.put(oldest_index)<line_sep>population_data[oldest_index]=perturbation<line_sep>population_loss[oldest_index]=np.mean(loss)<block_end># Restore the model back to origin model=torch.load(model_desc)<if_stmt>isinstance(model nn.DataParallel)<block_start>model=model.module<block_end>model.eval()<line_sep>model=model.to(self.device)<line_sep>self.pruner.model=model<line_sep>self.model=model<line_sep>self.pruner.reset()<line_sep>self.pruner.model.eval()<line_sep>self.pruner.forward(torch.zeros((1 3 self.img_size self.img_size) device=self.device))<line_sep>print('Generation {}, Step: {:.2f}, Min Loss: {:.3f}'.format(i step_size np.min(population_loss)))<block_end>total_t=time.time()-start_t<line_sep>print('Finished. Use {:.2f} hours. Minimum Loss: {:.3f}'.format(float(total_t)/3600 minimum_loss))<if_stmt><not>os.path.exists('./log')<block_start>os.makedirs('./log')<block_end>np.savetxt(os.path.join('./log' '{}_ea_loss.txt'.format(name)) np.array(mean_loss))<line_sep>np.savetxt(os.path.join('./log' '{}_ea_min.data'.format(name)) best_perturbation)<line_sep># Use the best affine transformation to obtain the resulting model self.pruner.pruning_with_transformations(original_dist best_perturbation target)<if_stmt><not>os.path.exists('./ckpt')<block_start>os.makedirs('./ckpt')<block_end>torch.save(self.pruner.model os.path.join('ckpt' '{}_bestarch_init.pt'.format(name)))<block_end><def_stmt>prune self name model_name long_ft target=-1<block_start>test_acc=[]<line_sep>b4ft_test_acc=[]<line_sep>density=[]<line_sep>flops=[]<line_sep># Get the accuracy before pruning acc=test(self.model self.test_loader device=self.device)<line_sep>test_acc.append(acc)<line_sep>b4ft_test_acc.append(acc)<line_sep>self.pruner.reset()<line_sep>self.model.eval()<line_sep>self.pruner.forward(torch.zeros((1 3 self.img_size self.img_size) device=self.device))<line_sep>b4prune_size=self.pruner.cur_size<line_sep>b4prune_flops=self.pruner.cur_flops<line_sep>density.append(self.pruner.cur_size)<line_sep>flops.append(self.pruner.cur_flops)<line_sep>print('Before Pruning, Acc: {:.2f}%, FLOPs: {:.3f}M, Size: {:.3f}M'.format(acc b4prune_flops/1e6 b4prune_size/1e6))<line_sep># If there is learned affine transformation, load it. <if_stmt>self.lub<ne>''<block_start>perturbation=np.loadtxt(self.lub)<block_end><else_stmt><block_start>perturbation=np.array([[1. 0.]<for>_ range(len(self.pruner.filter_ranks))])<block_end>self.pruner.pruning_with_transformations(self.pruner.filter_ranks perturbation target)<line_sep>self.pruner.reset()<line_sep>self.model.eval()<line_sep>self.pruner.forward(torch.zeros((1 3 self.img_size self.img_size) device=self.device))<line_sep>cur_flops=self.pruner.cur_flops<line_sep>cur_size=self.pruner.cur_size<line_sep>density.append(cur_size)<line_sep>flops.append(cur_flops)<line_sep>print('Density: {:.3f}% ({:.3f}M/{:.3f}M) | FLOPs: {:.3f}% ({:.3f}M/{:.3f}M)'.format(cur_size/b4prune_size<times>100 cur_size/1e6 b4prune_size/1e6 cur_flops/b4prune_flops<times>100 cur_flops/1e6 b4prune_flops/1e6))<line_sep>print('Fine tuning to recover from pruning iteration.')<if_stmt><not>os.path.exists('./ckpt')<block_start>os.makedirs('./ckpt')<block_end>print('Saving untrained pruned model...')<line_sep>torch.save(self.pruner.model os.path.join('ckpt' '{}_init.t7'.format(name)))<line_sep>acc=test(self.model self.test_loader device=self.device)<line_sep>b4ft_test_acc.append(acc)<if_stmt><not>os.path.exists('./log')<block_start>os.makedirs('./log')<block_end>print('Finished. Going to fine tune the model a bit more')<if_stmt>long_ft<g>0<block_start>optimizer=optim.SGD(self.model.parameters() lr=self.lr momentum=0.9 weight_decay=5e-4 nesterov=<true>)<line_sep>#scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, long_ft) scheduler=optim.lr_scheduler.MultiStepLR(optimizer [int(long_ft<times>0.3) int(long_ft<times>0.6) int(long_ft<times>0.8)] gamma=0.2)<if_stmt>args.no_val<block_start>train(self.model self.train_loader self.test_loader optimizer epochs=long_ft scheduler=scheduler device=self.device name=name)<block_end><else_stmt><block_start>train(self.model self.train_loader self.val_loader optimizer epochs=long_ft scheduler=scheduler device=self.device name=name)<block_end>acc=test(self.model self.test_loader device=self.device)<line_sep>test_acc.append(acc)<block_end><else_stmt><block_start>acc=test(self.model self.test_loader device=self.device)<line_sep>test_acc.append(acc)<block_end>log=np.stack([np.array(b4ft_test_acc) np.array(test_acc) np.array(density) np.array(flops)] axis=1)<line_sep>np.savetxt(os.path.join('./log' '{}_test_acc.txt'.format(name)) log)<line_sep>print('Summary')<line_sep>print('Before Pruning- Accuracy: {:.3f}, Cost: {:.3f}M'.format(test_acc[0] b4prune_flops/1e6))<line_sep>print('After Pruning- Accuracy: {:.3f}, Cost: {:.3f}M'.format(test_acc[-1] cur_flops/1e6))<block_end><block_end><def_stmt>get_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--name" type=str default='pruned_mbnetv2' help='Name for the experiments, the resulting model and logs will use this')<line_sep>parser.add_argument("--datapath" type=str default='./data' help='Path toward the dataset that is used for this experiment')<line_sep>parser.add_argument("--dataset" type=str default='torchvision.datasets.CIFAR10' help='The class name of the dataset that is used, please find available classes under the dataset folder')<line_sep>parser.add_argument("--model" type=str default='./ckpt/resnet56_cifar10.t7' help='The pre-trained model that pruning starts from')<line_sep>parser.add_argument("--pruner" type=str default='FilterPrunerResNet' help='Different network require differnt pruner implementation')<line_sep>parser.add_argument("--rank_type" type=str default='l2_weight' help='The ranking criteria for filter pruning')<line_sep>parser.add_argument("--lub" type=str default='' help='The affine transformations')<line_sep>parser.add_argument("--global_random_rank" action='store_true' default=<false> help='When this is specified, none of the rank_type matters, it will randomly prune the filters')<line_sep>parser.add_argument("--tau_hat" type=int default=0 help='The number of updates before evaluating for fitness (used in EA).')<line_sep>parser.add_argument("--long_ft" type=int default=60 help='It specifies how many epochs to fine-tune the network once the pruning is done')<line_sep>parser.add_argument("--prune_away" type=float default=90 help='How many percentage of constraints should be pruned away. E.g., 50 means 50% of FLOPs will be pruned away')<line_sep>parser.add_argument("--safeguard" type=float default=0 help='A floating point number that represent at least how many percentage of the original number of channel should be preserved. E.g., 0.10 means no matter what ranking, each layer should have at least 10% of the number of original channels.')<line_sep>parser.add_argument("--batch_size" type=int default=32 help='Batch size for training.')<line_sep>parser.add_argument("--min_lub" action='store_true' default=<false> help='Use Evolutionary Algorithm to solve latent variable for minimizing Lipschitz upper bound')<line_sep>parser.add_argument("--uniform_pruning" action='store_true' default=<false> help='Use Evolutionary Algorithm to solve latent variable for minimizing Lipschitz upper bound')<line_sep>parser.add_argument("--no_val" action='store_true' default=<false> help='Use full dataset to train (use to compare with prior art in CIFAR-10)')<line_sep>parser.add_argument("--cpu" action='store_true' default=<false> help='Use CPU')<line_sep>parser.add_argument("--lr" type=float default=0.001 help='The learning rate for fine-tuning')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><if_stmt>__name__<eq>'__main__'<block_start>args=get_args()<line_sep>print(args)<line_sep>print('Pruning {}'.format(args.name))<line_sep>img_size=32<line_sep>device='cpu'<if>args.cpu<else>'cuda'<line_sep>prune_till=-1<line_sep>prune_away=args.prune_away<line_sep>model=torch.load(args.model)<if_stmt>isinstance(model nn.DataParallel)<block_start>model=model.module<block_end>model=model.to(device)<line_sep>legr=LeGR(args.dataset args.datapath model args.pruner args.rank_type args.batch_size args.lr safeguard=args.safeguard global_random_rank=args.global_random_rank lub=args.lub device=device)<if_stmt>prune_away<g>0<block_start>dummy_size=32<if>'CIFAR'<in>args.dataset<else>224<line_sep>legr.pruner.reset()<line_sep>legr.model.eval()<line_sep>legr.pruner.forward(torch.zeros((1 3 dummy_size dummy_size) device=device))<line_sep>b4prune_flops=legr.pruner.cur_flops<line_sep>prune_till=b4prune_flops<times>(1-(prune_away)/100.)<line_sep>print('Pruned untill {:.3f}M'.format(prune_till/1000000.))<if_stmt>args.uniform_pruning<block_start>ratio=legr.pruner.get_uniform_ratio(prune_till)<line_sep>legr.pruner.safeguard=ratio<line_sep>prune_away=99<block_end><block_end><if_stmt>args.min_lub<block_start>legr.learn_ranking_ea(args.name args.model args.tau_hat args.long_ft (1-(prune_away)/100.))<block_end><else_stmt><block_start>legr.prune(args.name args.model args.long_ft (1-(prune_away)/100.))<block_end><block_end>
<def_stmt>find_missing_number c<block_start>b=max(c)<line_sep>d=min(c+[0])<if>min(c)<eq>1<else>min(c)<line_sep>v=set(range(d b))-set(c)<line_sep><return>list(v)[0]<if>v<ne>set()<else><none><block_end>print(find_missing_number([1 3 2 4]))<line_sep>print(find_missing_number([0 2 3 4 5]))<line_sep>print(find_missing_number([9 7 5 8]))<line_sep>
<import_from_stmt>server jobs utils<import_from_stmt>server.constants STAFF_ROLES<import_from_stmt>server.models Assignment GradingTask User<line_sep>@jobs.background_job<def_stmt>assign_grading_queues assignment_id staff kind<block_start>logger=jobs.get_job_logger()<line_sep>cid=jobs.get_current_job().course_id<line_sep>assign=Assignment.query.filter_by(id=assignment_id course_id=cid).one()<line_sep>selected_users=[]<for_stmt>hash_id staff<block_start>user=User.get_by_id(utils.decode_id(hash_id))<if_stmt>user<and>user.is_enrolled(cid roles=STAFF_ROLES)<block_start>selected_users.append(user)<block_end><block_end># Available backups data=assign.course_submissions()<line_sep>backups=set(b['backup']['id']<for>b data<if>b['backup'])<line_sep>tasks=GradingTask.create_staff_tasks(backups selected_users assignment_id cid kind)<line_sep>logger.info("{} created!".format(tasks))<block_end>
# This configuration is designed to be run as the second # in a series of cmsRun processes. The process it configures # will read a file in streamer format and produces two root # files. # For later event selection tests these paths are run: # path p1 1:25 pass # path p2 pass 51:60 # Checks the path names returned by the TriggerNames # service. # Multiple products are put in the event for use # in subsequent processes. # Two output files are created, one contains some # fake raw data, the other contains some fake # HLTDebug data (actual just dummy products containing # an int, just for test purposes) <import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("PROD")<line_sep>process.load("FWCore.MessageService.MessageLogger_cfi")<line_sep>process.MessageLogger.cerr.FwkReport.reportEvery=1000<import_stmt>FWCore.Framework.test.cmsExceptionsFatalOption_cff<line_sep>process.options=cms.untracked.PSet(# wantSummary = cms.untracked.bool(True), Rethrow=FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow)<line_sep>process.source=cms.Source("NewEventStreamFileReader" fileNames=cms.untracked.vstring('file:testSeriesOfProcessesHLT.dat'))<line_sep>process.f1=cms.EDFilter("TestFilterModule" acceptValue=cms.untracked.int32(25) onlyOne=cms.untracked.bool(<false>))<line_sep>process.f2a=cms.EDFilter("TestFilterModule" acceptValue=cms.untracked.int32(50) onlyOne=cms.untracked.bool(<false>))<line_sep>process.f2b=cms.EDFilter("TestFilterModule" acceptValue=cms.untracked.int32(10) onlyOne=cms.untracked.bool(<false>))<line_sep>process.a=cms.EDAnalyzer("TestTriggerNames" trigPathsPrevious=cms.untracked.vstring('p01' 'p02' 'p03' 'p04') streamerSource=cms.untracked.bool(<true>) trigPaths=cms.untracked.vstring('p1' 'p2') dumpPSetRegistry=cms.untracked.bool(<false>))<line_sep># This puts products in the lumi's and run's. One failure # mode of the maxLuminosityBlock parameter is tested by their # mere existence. process.makeRunLumiProducts=cms.EDProducer("ThingWithMergeProducer")<line_sep># In the next process we want to test input from a secondary input # file so we split the products over 2 output files. process.out1=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('testSeriesOfProcessesPROD1.root') outputCommands=cms.untracked.vstring("drop *" "keep *_fakeRaw_*_*"))<line_sep>process.out2=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('testSeriesOfProcessesPROD2.root') outputCommands=cms.untracked.vstring("keep *" "drop *_fakeRaw_*_*"))<line_sep>process.pathanalysis=cms.EDAnalyzer("PathAnalyzer")<line_sep>process.p1=cms.Path(process.f1<times>process.makeRunLumiProducts)<line_sep>process.p2=cms.Path(~process.f2a<times>process.f2b)<line_sep>process.e=cms.EndPath(process.a<times>process.pathanalysis<times>process.out1<times>process.out2)<line_sep>
## # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## <import_stmt>random<import_stmt>pycylon<as>cn<import_from_stmt>pycylon DataFrame CylonEnv<import_from_stmt>pycylon.net MPIConfig<line_sep>df1=DataFrame([random.sample(range(10 100) 5) random.sample(range(10 100) 5)])<line_sep>df2=DataFrame([random.sample(range(10 100) 5) random.sample(range(10 100) 5)])<line_sep>df3=DataFrame([random.sample(range(10 100) 10) random.sample(range(10 100) 10)])<line_sep># local unique df4=cn.concat(axis=0 objs=[df1 df2 df3])<line_sep>print("Local concat axis0")<line_sep>print(df4)<line_sep>df2.rename(['00' '11'])<line_sep>df3.rename(['000' '111'])<line_sep>df4=cn.concat(axis=1 objs=[df1 df2 df3])<line_sep>print("Local concat axis1")<line_sep>print(df4)<line_sep># distributed unique env=CylonEnv(config=MPIConfig())<line_sep>df1=DataFrame([random.sample(range(10<times>env.rank 15<times>(env.rank+1)) 5) random.sample(range(10<times>env.rank 15<times>(env.rank+1)) 5)])<line_sep>df2=DataFrame([random.sample(range(10<times>env.rank 15<times>(env.rank+1)) 5) random.sample(range(10<times>env.rank 15<times>(env.rank+1)) 5)])<line_sep>df3=DataFrame([random.sample(range(10<times>env.rank 15<times>(env.rank+1)) 10) random.sample(range(10<times>env.rank 15<times>(env.rank+1)) 10)])<line_sep>print("Distributed concat axis0" env.rank)<line_sep>df4=cn.concat(axis=0 objs=[df1 df2 df3] env=env)<line_sep>print(df4)<line_sep>df2.rename(['00' '11'])<line_sep>df3.rename(['000' '111'])<line_sep>df4=cn.concat(axis=1 objs=[df1 df2 df3] env=env)<line_sep>print("Distributed concat axis1" env.rank)<line_sep>print(df4)<line_sep>env.finalize()<line_sep>
<import_stmt>functools<import_stmt>itertools<import_from_stmt>argparse SUPPRESS ArgumentDefaultsHelpFormatter ArgumentParser Namespace<import_from_stmt>typing Any Callable List NamedTuple Optional Tuple cast<def_stmt>make_prefixes desc:str<arrow>List[str]<block_start>parts=desc.split("|")<line_sep>ret=[parts[0]]<for_stmt>part parts[1:]<block_start>ret.append(ret[-1]+part)<block_end><return>ret<block_end><def_stmt>generate_aliases spec:str<arrow>Tuple[str List[str]]<block_start>""" Take the given string and split it by spaces. For each word, split it by pipe characters and compute the result of joining each prefix of that list. Return a big list containing all the results, except that the result of joining the whole first word is pulled out. "c|heck|out co" => ["c|heck|out", "co"] => [["c", "heck", "out"], ["co"]] => [["c", "check", "checkout"], ["co"]] => "checkout", ["c", "check", "co"] """<line_sep>prefixes=[make_prefixes(s)<for>s spec.split()]<line_sep>main=prefixes[0].pop()<line_sep><return>main list(itertools.chain.from_iterable(prefixes))<block_end># Classes used to represent the structure of an argument parser setup; these # are turned into actual `argparse` objects by `add_args`. <class_stmt>Cmd<block_start>"""Describes a subcommand."""<def_stmt>__init__ self name:str func:Optional[Callable] help_str:str subs:List[Any] is_default:bool=<false> <arrow><none><block_start>""" `subs` is a list containing `Cmd`, `Arg`, and `Group` that describes the arguments, subcommands, and mutually exclusive argument groups for this command. """<line_sep>self.name=name<line_sep>self.help_str=help_str<line_sep>self.func=func<if_stmt>self.func# Force the help string onto the actual function for later. This # can be used to print the help string <block_start>self.func.__name__=help_str<block_end>self.subs=subs<line_sep>self.is_default=is_default<block_end><block_end><class_stmt>Arg<block_start>""" Describes an argument. Arguments to the constructor are passed to `add_argument`. """<def_stmt>__init__ self *args:Any completer:Optional[Callable]=<none> **kwargs:Any<arrow><none><block_start>self.args=args<line_sep>self.kwargs=kwargs<line_sep>self.completer=completer<block_end><block_end><class_stmt>Group<block_start>"""Describes a mutually exclusive group of options."""<def_stmt>__init__ self *options:Arg **kwargs:Any<arrow><none><block_start>self.options=options<line_sep>self.kwargs=kwargs<block_end><block_end><class_stmt>ArgGroup<block_start>""" Describes a named conceptual group of options. Arguments are passed to `add_argument_group`. """<def_stmt>__init__ self title:Optional[str]=<none> description:Optional[str]=<none> child_args:Optional[List[Arg]]=<none> <arrow><none><block_start>self.title=title<line_sep>self.description=description<line_sep>self.child_args=child_args<or>[]<block_end><block_end><class_stmt>BoolOptArg(NamedTuple)<block_start>"""Describes a boolean --foo / --no-foo flag pair."""<line_sep>true_name:str<line_sep>false_name:str<line_sep>dest:str<line_sep>default:bool=<false><line_sep>true_help:Optional[str]=<none><line_sep>false_help:Optional[str]=<none><block_end><def_stmt>wrap_func parser:ArgumentParser func:Callable<arrow>Callable<block_start>@functools.wraps(func)<def_stmt>wrapper args:Namespace<arrow>Any<block_start>args.func=func<line_sep><return>func(parser.parse_args([] args))<block_end><return>wrapper<block_end><def_stmt>help_func parser:ArgumentParser<arrow>Callable<block_start>""" Return a function that prints help for the given parser. Using this doesn't exit during the call to to `parse_args` itself, which would be ideal, but only when the function from the `parse_args` result is called. It looks about the same as long as you do the second right after the first, at least. """<def_stmt>inner_func args:Namespace<arrow>Any<block_start>parser.print_help()<block_end><return>inner_func<block_end><def_stmt>add_args parser:ArgumentParser description:List[Any] depth:int=0<arrow><none><block_start>""" Populate the given parser with arguments, as specified by the description. The description is a list of Arg, Cmd, and Group objects. """<line_sep>subparsers=<none><line_sep>help_parser=<none><def_stmt>description_sort_key desc:Any<arrow>str<block_start><if_stmt>isinstance(desc Cmd)<block_start><return>desc.name<block_end># `sorted` is stable, so we shouldn't change the relative # positioning of non-Cmd arg descriptions. <return>""<block_end># Sort descriptions alphabetically by name before passing them to # argparse. This ensures that `help` output is sorted # alphabetically. description=sorted(description key=description_sort_key)<for_stmt>thing description<block_start><if_stmt>isinstance(thing Cmd)<block_start><if_stmt>subparsers<is><none><block_start>metavar="sub"<times>depth+"command"<line_sep>subparsers=parser.add_subparsers(metavar=metavar)<line_sep># If there are any subcommands at all, also add a `help` # subcommand. help_parser=subparsers.add_parser("help" help="show help for this command")<line_sep>help_parser.set_defaults(func=help_func(parser))<block_end>main_name,aliases=generate_aliases(thing.name)<line_sep>subparser_kwargs={"aliases":aliases "formatter_class":ArgumentDefaultsHelpFormatter }<if_stmt>thing.help_str<ne>SUPPRESS<block_start>subparser_kwargs["help"]=thing.help_str<block_end>subparser=subparsers.add_parser(main_name **subparser_kwargs)<line_sep>subparser.set_defaults(func=thing.func)<line_sep>subparser.set_defaults(**{("_"+"sub"<times>depth+"command"):thing.name})<line_sep># If this is the default subcommand, make calling the parent with # no subcommand behave the same as calling this subcommand with no # arguments. <if_stmt>thing.is_default<block_start>thing.func=cast(Callable thing.func)<line_sep>parser.set_defaults(func=wrap_func(subparser thing.func))<block_end>add_args(subparser thing.subs depth+1)<block_end><elif_stmt>isinstance(thing Arg)<block_start>arg=parser.add_argument(*thing.args **thing.kwargs)<line_sep>arg.completer=thing.completer<block_end># type: ignore <elif_stmt>isinstance(thing Group)<block_start>group=parser.add_mutually_exclusive_group(**thing.kwargs)<for_stmt>option thing.options<block_start>group.add_argument(*option.args **option.kwargs)<block_end><block_end><elif_stmt>isinstance(thing ArgGroup)<block_start>arg_group=parser.add_argument_group(thing.title thing.description)<for_stmt>child_arg thing.child_args<block_start>arg_group.add_argument(*child_arg.args **child_arg.kwargs)<block_end><block_end><elif_stmt>isinstance(thing BoolOptArg)<block_start>parser.add_argument(thing.true_name dest=thing.dest action="store_true" help=thing.true_help)<line_sep>parser.add_argument(thing.false_name dest=thing.dest action="store_false" help=thing.false_help)<line_sep>parser.set_defaults(**{thing.dest:thing.default})<block_end><block_end># If there are any subcommands but none claimed the default action, make # the default print help. <if_stmt>subparsers<is><not><none><and>parser.get_default("func")<is><none><block_start>parser.set_defaults(func=help_func(parser))<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>datetime datetime<import_from_stmt>dj.choices Choices<import_from_stmt>django.db models<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>ralph.assets.models.base BaseObject<import_from_stmt>ralph.lib.mixins.models AdminAbsoluteUrlMixin TaggableMixin TimeStampMixin <import_from_stmt>ralph.lib.permissions PermByFieldMixin<def_stmt>any_exceeded vulnerabilties<block_start><return>any([v.is_deadline_exceeded<for>v vulnerabilties])<block_end><class_stmt>ScanStatus(Choices)<block_start>_=Choices.Choice<line_sep>ok=_("ok")<line_sep>fail=_("fail")<line_sep>error=_("error")<block_end><class_stmt>Risk(Choices)<block_start>_=Choices.Choice<line_sep>low=_("low")<line_sep>medium=_("medium")<line_sep>high=_("high")<block_end><class_stmt>Vulnerability(AdminAbsoluteUrlMixin PermByFieldMixin TimeStampMixin TaggableMixin models.Model )<block_start>_allow_in_dashboard=<true><line_sep>name=models.CharField(verbose_name=_("name") max_length=1024 unique=<false>)<line_sep>display_name=models.CharField(verbose_name=_("display name") max_length=1024)<line_sep>patch_deadline=models.DateTimeField(null=<true> blank=<true>)<line_sep>risk=models.PositiveIntegerField(choices=Risk() null=<true> blank=<true>)<line_sep>external_vulnerability_id=models.IntegerField(unique=<true> # id means id null=<true> blank=<true> help_text=_('Id of vulnerability from external system') )<line_sep>@property<def_stmt>is_deadline_exceeded self<block_start><return>self.patch_deadline<l>datetime.now()<block_end><def_stmt>__str__ self<block_start>deadline=(self.patch_deadline.strftime('%Y-%m-%d')<if>self.patch_deadline<else>'-')<line_sep><return>"{} ({})".format(self.name deadline)<block_end><block_end><class_stmt>SecurityScan(AdminAbsoluteUrlMixin PermByFieldMixin TimeStampMixin TaggableMixin models.Model )<block_start>_allow_in_dashboard=<true><line_sep>last_scan_date=models.DateTimeField()<line_sep>scan_status=models.PositiveIntegerField(choices=ScanStatus())<line_sep>next_scan_date=models.DateTimeField()<line_sep>details_url=models.URLField(max_length=255 blank=<true>)<line_sep>rescan_url=models.URLField(blank=<true> verbose_name=_('Rescan url'))<line_sep>base_object=models.OneToOneField(BaseObject on_delete=models.CASCADE )<line_sep>vulnerabilities=models.ManyToManyField(Vulnerability blank=<true>)<line_sep># this is a quirk field, it is updated manually (for now it's in API) # this is because it's hard to handling it automatically # (its value is computated depending on M2M field and M2M signals are # complicated) is_patched=models.BooleanField(default=<false>)<line_sep>@property<def_stmt>is_ok self<block_start><return>self.scan_status<eq>ScanStatus.ok.id<block_end><def_stmt>update_is_patched self<block_start>"""Updates `is_patched` field depending on vulnerabilities"""<line_sep>self.is_patched=<not>any_exceeded(self.vulnerabilities.all())<block_end><def_stmt>__str__ self<block_start><return>"{} {} ({})".format(self.last_scan_date.strftime('%Y-%m-%d') ScanStatus.from_id(self.scan_status).desc self.base_object.content_type )<block_end><block_end>
<class_stmt>TrieNode<block_start><def_stmt>__init__ self<block_start>self.flag=<false><line_sep>self.children={}<block_end><block_end><class_stmt>Trie(object)<block_start><def_stmt>__init__ self<block_start>""" Initialize your data structure here. """<line_sep>self.root=TrieNode()<block_end><def_stmt>insert self word<block_start>""" Inserts a word into the trie. :type word: str :rtype: None """<line_sep>current=self.root<for_stmt>character word<block_start><if_stmt>character<not><in>current.children<block_start>current.children[character]=TrieNode()<block_end>current=current.children[character]<block_end>current.flag=<true><block_end><def_stmt>search self word<block_start>""" Returns if the word is in the trie. :type word: str :rtype: bool """<line_sep>result,node=self.childSearch(word)<if_stmt>result<block_start><return>node.flag<block_end><return><false><block_end><def_stmt>startsWith self prefix<block_start>""" Returns if there is any word in the trie that starts with the given prefix. :type prefix: str :rtype: bool """<line_sep>result,node=self.childSearch(prefix)<line_sep><return>result<block_end><def_stmt>childSearch self word<block_start>current=self.root<for_stmt>character word<block_start><if_stmt>character<in>current.children<block_start>current=current.children[character]<block_end><else_stmt><block_start><return><false> <none><block_end><block_end><return><true> current<block_end><block_end># Your Trie object will be instantiated and called as such: # obj = Trie() # obj.insert(word) # param_2 = obj.search(word) # param_3 = obj.startsWith(prefix)
<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>x=np.linspace(0 100 1000)<line_sep>H=50<line_sep>delta=2.5<line_sep>T_base=2<line_sep>T_star=1<line_sep>T=T_star+0.5<times>(T_base-T_star)<times>(1.0+np.tanh((x-(H)-1.5<times>delta)/(0.5<times>delta)))<line_sep>plt.plot(x T)<line_sep>plt.plot([H H] [1 T_base])<line_sep>plt.plot([H+3<times>delta H+3<times>delta] [1 T_base])<line_sep>plt.xlim(H-2<times>delta H+4<times>delta)<line_sep>plt.savefig("profile2.png")<line_sep>
'''show_route.py IOS parsers for the following show commands: * show ip route * show ip route vrf <vrf> * show ipv6 route * show ipv6 route vrf <vrf> * show ip route <Hostname or A.B.C.D> * show ip route vrf <vrf> <Hostname or A.B.C.D> * show ipv6 route <Hostname or 2001:DB8:64:79::C:D> * show ipv6 route vrf <vrf> <Hostname or 2001:DB8:64:79::C:D> * show ipv6 route updated * show ipv6 route vrf <vrf> updated * show ip route summary * show ip route vrf <vrf> summary '''<import_from_stmt>genie.libs.parser.iosxe.show_routing ShowIpv6RouteUpdated<as>ShowIpv6RouteUpdated_iosxe ShowIpRouteSummary<as>ShowIpRouteSummary_iosxe ShowIpRouteDistributor<as>ShowIpRouteDistributor_iosxe ShowIpv6RouteDistributor<as>ShowIpv6RouteDistributor_iosxe <class_stmt>ShowIpRouteDistributor(ShowIpRouteDistributor_iosxe)<block_start>"""distributor class for show ip route"""<line_sep><pass><block_end><class_stmt>ShowIpv6RouteDistributor(ShowIpv6RouteDistributor_iosxe)<block_start>"""distributor class for show ipv6 route"""<line_sep><pass><block_end><class_stmt>ShowIpv6RouteUpdated(ShowIpv6RouteUpdated_iosxe)<block_start>"""Parser for : show ipv6 route updated show ipv6 route vrf <vrf> updated"""<line_sep><pass><block_end><class_stmt>ShowIpRouteSummary(ShowIpRouteSummary_iosxe)<block_start>"""Parser for : show ip route summary show ip route vrf <vrf> summary"""<line_sep><pass><block_end>
<import_from_stmt>.augmenters Elastic2D Elastic3D<line_sep>
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>os<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>bigdl.dataset.base maybe_download<import_from_stmt>test.zoo.pipeline.utils.test_utils ZooTestCase<import_from_stmt>zoo.pipeline.inference InferenceModel<import_stmt>tarfile<line_sep>np.random.seed(1337)# for reproducibility resource_path=os.path.join(os.path.split(__file__)[0] "../../resources")<line_sep>property_path=os.path.join(os.path.split(__file__)[0] "../../../../../zoo/target/classes/app.properties")<line_sep>data_url="https://s3-ap-southeast-1.amazonaws.com"<with_stmt>open(property_path)<as>f<block_start><for_stmt>_ range(2)# skip the first two lines <block_start>next(f)<block_end><for_stmt>line f<block_start><if_stmt>"data-store-url"<in>line<block_start>line=line.strip()<line_sep>data_url=line.split("=")[1].replace("\\" "")<block_end><block_end><block_end><class_stmt>TestInferenceModel(ZooTestCase)<block_start><def_stmt>test_load_bigdl self<block_start>model=InferenceModel(3)<line_sep>model.load_bigdl(os.path.join(resource_path "models/bigdl/bigdl_lenet.model"))<line_sep>input_data=np.random.random([4 28 28 1])<line_sep>output_data=model.predict(input_data)<block_end><def_stmt>test_load_caffe self<block_start>model=InferenceModel(10)<line_sep>model.load_caffe(os.path.join(resource_path "models/caffe/test_persist.prototxt") os.path.join(resource_path "models/caffe/test_persist.caffemodel"))<line_sep>input_data=np.random.random([4 3 8 8])<line_sep>output_data=model.predict(input_data)<block_end><def_stmt>test_load_openvino self<block_start>local_path=self.create_temp_dir()<line_sep>model=InferenceModel(1)<line_sep>model_url=data_url+"/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.xml"<line_sep>weight_url=data_url+"/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.bin"<line_sep>model_path=maybe_download("resnet_v1_50.xml" local_path model_url)<line_sep>weight_path=maybe_download("resnet_v1_50.bin" local_path weight_url)<line_sep>model.load_openvino(model_path weight_path)<line_sep>input_data=np.random.random([4 1 224 224 3])<line_sep>model.predict(input_data)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>pytest.main([__file__])<block_end>
""" A mixin module for USB Human Interface Device instruments """<import_stmt>os<import_stmt>time<import_stmt>struct<import_from_stmt>typing Optional List Any<try_stmt><block_start><import_stmt>pywinusb.hid<as>hid<block_end><except_stmt>ImportError# We will raise a proper error when we attempt to instantiate a driver. # Raising an exception here will cause CI to fail under Linux <block_start>hid=<none><block_end><import_from_stmt>qcodes.instrument.base Instrument<class_stmt>USBHIDMixin(Instrument)<block_start>""" Args: instance_id: The id of the instrument we want to connect to. If there is only one instrument, then this argument is optional. If more than one instrument happen to be connected, use `enumerate_devices` method to query their IDs timeout: Specify a timeout for this instrument in seconds """<line_sep># The following class attributes should be set by subclasses vendor_id=0x0000<line_sep>product_id=0x0000<line_sep>@staticmethod<def_stmt>_check_hid_import <arrow><none><block_start><if_stmt>os.name<ne>'nt'<block_start><raise>ImportError("This driver only works on Windows.")<block_end><if_stmt>hid<is><none><block_start><raise>ImportError("pywinusb is not installed. Please install it by typing "<concat>"'pip install pywinusb' in a qcodes environment terminal")<block_end><block_end><def_stmt>__init__ self name:str instance_id:Optional[str]=<none> timeout:float=2 **kwargs:Any<block_start>self._check_hid_import()<line_sep>devs=hid.HidDeviceFilter(product_id=self.product_id vendor_id=self.vendor_id instance_id=instance_id).get_devices()<if_stmt>len(devs)<eq>0<block_start><raise>RuntimeError("No instruments found!")<block_end><elif_stmt>len(devs)<g>1<block_start><raise>RuntimeError("Multiple HID devices detected! Please supply "<concat>"a instance id")<block_end>self._device=devs[0]<line_sep>self._device.open()<line_sep>self._data_buffer:Optional[bytes]=<none><line_sep>self._device.set_raw_data_handler(self._handler)<line_sep>self._timeout=timeout<line_sep>self._tries_per_second=5<line_sep>super().__init__(name **kwargs)<block_end><def_stmt>_handler self data:bytes<arrow><none><block_start>self._data_buffer=data<block_end><def_stmt>_get_data_buffer self<arrow>Optional[bytes]<block_start>data=self._data_buffer<line_sep>self._data_buffer=<none><line_sep><return>data<block_end><def_stmt>_pack_string self cmd:str<arrow>bytes<block_start><raise>NotImplementedError("Please subclass")<block_end><def_stmt>_unpack_string self response:bytes<arrow>str<block_start><raise>NotImplementedError("Please subclass")<block_end><def_stmt>write_raw self cmd:str<arrow><none><block_start>""" Send a string command to the human interface device The given command is processed by `_pack_string` method to return a byte sequence that is going to be actually sent to the device. Subclasses must implement `_pack_string` method. Args: cmd: a command to send in a form of a string """<line_sep>data=self._pack_string(cmd)<line_sep>result=self._device.send_output_report(data)<if_stmt><not>result<block_start><raise>RuntimeError(f"Communication with device failed for command "<concat>f"{cmd}")<block_end><block_end><def_stmt>ask_raw self cmd:str<arrow>str<block_start>""" Send a string command to the human interface device and wait for a reply The given command is processed by `_pack_string` method to return a byte sequence that is going to be actually sent to the device. Subclasses must implement `_pack_string` method. The byte sequence of the reply is processed by `_unpack_string` method, and the resulting string is returned. Subclasses must implement `_unpack_string` method. Args: cmd: a command to send in a form of a string """<line_sep>self.write_raw(cmd)<line_sep>number_of_tries=int(self._tries_per_second<times>self._timeout)<line_sep>response=<none><for_stmt>_ range(number_of_tries)<block_start>time.sleep(1/self._tries_per_second)<line_sep>response=self._get_data_buffer()<if_stmt>response<is><not><none><block_start><break><block_end><block_end><if_stmt>response<is><none><block_start><raise>TimeoutError(f"Timed out for command {cmd}")<block_end><return>self._unpack_string(response)<block_end><def_stmt>close self<arrow><none><block_start>self._device.close()<block_end>@classmethod<def_stmt>enumerate_devices cls<arrow>List[str]<block_start>""" This method returns the 'instance_id's of all connected devices for with the given product and vendor IDs. """<line_sep>cls._check_hid_import()<line_sep>devs=hid.HidDeviceFilter(porduct_id=cls.product_id vendor_id=cls.vendor_id).get_devices()<line_sep><return>[dev.instance_id<for>dev devs]<block_end><block_end><class_stmt>MiniCircuitsHIDMixin(USBHIDMixin)<block_start>""" The specific implementation for mini circuit human interface devices. This implementation allows to use `write`/`ask` methods of the instrument instance to send SCPI commands to MiniCircuits instruments over USB HID connection. Args: name: instrument name instance_id: The id of the instrument we want to connect. If there is only one instrument then this is an optional argument. If we have more then one instrument, use the class method `enumerate_devices` to query their IDs timeout: Specify a timeout for this instrument in seconds """<def_stmt>__init__ self name:str instance_id:Optional[str]=<none> timeout:float=2 **kwargs:Any# USB interrupt code for sending SCPI commands <block_start>self._sending_scpi_cmds_code=1<line_sep>self._usb_endpoint=0<line_sep>self._end_of_message=b"\x00"<line_sep>self.packet_size=64<line_sep>super().__init__(name instance_id timeout **kwargs)<block_end><def_stmt>_pack_string self cmd:str<arrow>bytes<block_start>""" Pack a string to a binary format such that it can be sent to the HID. Args: cmd: a SCPI command to send """<line_sep>str_len=len(cmd)<line_sep># "-1" is here because we need to compensate for the first byte in # the packet which is always the usb interrupt code of the command # (in this case the command tell the device that we are querying a # SCPI command) pad_len=self.packet_size-str_len-1<if_stmt>pad_len<l>0<block_start><raise>ValueError(f"Length of data exceeds {self.packet_size} B")<block_end>packed_data=struct.pack(f"BB{str_len}s{pad_len}x" self._usb_endpoint self._sending_scpi_cmds_code cmd.encode("ascii"))<line_sep><return>packed_data<block_end><def_stmt>_unpack_string self response:bytes<arrow>str<block_start>""" Unpack data received from the instrument into a string Note that this method is not specific to SCPI-only responses. Args: response: a raw byte sequence response from the instrument """<line_sep>_,_,reply_data=struct.unpack(f"BB{self.packet_size-1}s" bytes(response))<line_sep>span=reply_data.find(self._end_of_message)<line_sep><return>reply_data[:span].decode("ascii")<block_end><block_end>
<import_from_stmt>conans ConanFile tools<import_from_stmt>conans.errors ConanInvalidConfiguration<import_stmt>os<line_sep>required_conan_version=">=1.33.0"<class_stmt>TCSBankUconfigConan(ConanFile)<block_start>name="tcsbank-uconfig"<line_sep>description="Lightweight, header-only, C++17 configuration library"<line_sep>topics=("conan" "configuration" "env" "json")<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>homepage="https://github.com/TinkoffCreditSystems/uconfig"<line_sep>license="Apache-2.0"<line_sep>generators="cmake" "cmake_find_package_multi"<line_sep>settings="os" "arch" "compiler" "build_type"<line_sep>options={"with_rapidjson":[<true> <false>] }<line_sep>default_options={"with_rapidjson":<true> }<line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end>@property<def_stmt>_build_subfolder self<block_start><return>"build_subfolder"<block_end><def_stmt>requirements self<block_start><if_stmt>self.options.with_rapidjson<block_start>self.requires("rapidjson/1.1.0")<block_end><block_end><def_stmt>validate self<block_start>compiler=str(self.settings.compiler)<line_sep>compiler_version=tools.Version(self.settings.compiler.version)<line_sep>min_req_cppstd="17"<if_stmt>self.settings.compiler.cppstd<block_start>tools.check_min_cppstd(self min_req_cppstd)<block_end><else_stmt><block_start>self.output.warn("%s recipe lacks information about the %s compiler"<concat>" standard version support."%(self.name compiler))<block_end>minimal_version={"Visual Studio":"16" "gcc":"7.3" "clang":"6.0" "apple-clang":"10.0" }<line_sep># Exclude not supported compilers <if_stmt>compiler<not><in>minimal_version<block_start>self.output.info("%s requires a compiler that supports at least C++%s"%(self.name min_req_cppstd))<line_sep><return><block_end><if_stmt>compiler_version<l>minimal_version[compiler]<block_start><raise>ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s. %s %s is not supported."%(self.name min_req_cppstd compiler compiler_version))<block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version] destination=self._source_subfolder strip_root=<true>)<block_end><def_stmt>package self<block_start>self.copy("LICENSE" src=self._source_subfolder dst="licenses")<line_sep>self.copy("*.h" dst="include" src=os.path.join(self._source_subfolder "include"))<line_sep>self.copy("*.ipp" dst="include" src=os.path.join(self._source_subfolder "include"))<block_end><def_stmt>package_info self<block_start>self.cpp_info.names["pkg_config"]="uconfig"<line_sep>self.cpp_info.names["cmake_find_package"]="uconfig"<line_sep>self.cpp_info.names["cmake_find_package_multi"]="uconfig"<if_stmt>self.options.with_rapidjson<block_start>self.cpp_info.defines=["RAPIDJSON_HAS_STDSTRING=1"]<block_end><block_end><def_stmt>package_id self<block_start>self.info.header_only()<block_end><block_end>
"""Membership operators @see: https://www.w3schools.com/python/python_operators.asp Membership operators are used to test if a sequence is presented in an object. """<def_stmt>test_membership_operators <block_start>"""Membership operators"""<line_sep># Let's use the following fruit list to illustrate membership concept. fruit_list=["apple" "banana"]<line_sep># in # Returns True if a sequence with the specified value is present in the object. # Returns True because a sequence with the value "banana" is in the list <assert_stmt>"banana"<in>fruit_list<line_sep># not in # Returns True if a sequence with the specified value is not present in the object # Returns True because a sequence with the value "pineapple" is not in the list. <assert_stmt>"pineapple"<not><in>fruit_list<block_end>
# coding: utf-8 <import_from_stmt>os.path abspath dirname realpath<import_from_stmt>pathlib Path<import_stmt>pytest<import_stmt>yaml<import_from_stmt>pybatfish.client.session Session<line_sep>_THIS_DIR:Path=Path(abspath(dirname(realpath(__file__))))<line_sep>_DOC_DIR:Path=_THIS_DIR.parent<line_sep>_QUESTIONS_YAML:Path=_DOC_DIR/"nb_gen"/"questions.yaml"<line_sep>@pytest.fixture(scope="session")<def_stmt>session <block_start><return>Session()<block_end>@pytest.fixture(scope="session")<def_stmt>categories <block_start><return>yaml.safe_load(_QUESTIONS_YAML.open())<block_end>
<import_from_future_stmt> print_function<import_from_stmt>builtins object<import_from_stmt>builtins str<import_from_stmt>typing Dict<import_from_stmt>empire.server.common.module_models PydanticModule<class_stmt>Module(object)<block_start>@staticmethod<def_stmt>generate main_menu module:PydanticModule params:Dict obfuscate:bool=<false> obfuscation_command:str=""<block_start>script=''<if_stmt>params['Debug']<block_start>debug=params['Debug']<line_sep>script<augadd>"Debug = "+str(debug)+'\n'<block_end><if_stmt>params['HistoryCount']<block_start>search=params['HistoryCount']<line_sep>script<augadd>'HistoryCount = '+str(search)+'\n'<block_end>script<augadd>""" try: import subprocess import sys import os import time from os.path import expanduser # Get Home User home = str(expanduser("~")) sudo = True # Check for sudo privs, if true than set true process = subprocess.Popen('which sudo|wc -l', stdout=subprocess.PIPE, shell=True) result = process.communicate() result = result[0].strip() if str(result) != "1": print("[!] ERROR some shit requires (sudo) privileges!") sudo = False sys.exit() # Enum Hostname try: process = subprocess.Popen('hostname', stdout=subprocess.PIPE, shell=True) hostname = process.communicate() hostname = hostname[0].strip() print("[*] Hostname:") print((" - " + str(hostname.strip()))) except Exception as e: if Debug: print(("[!] Error enumerating hostname: " + str(e))) pass # Enum Software Package try: process = subprocess.Popen('sw_vers -productVersion', stdout=subprocess.PIPE, shell=True) swvers = process.communicate() swvers = swvers[0].strip() print("[*] MAC OS Package Level:") print((" - " + str(swvers.strip()))) except Exception as e: if Debug: print(("[!] Error enumerating OS Package: " + str(e))) pass # Enume system Hardware Overview try: process = subprocess.Popen("system_profiler SPHardwareDataType", stdout=subprocess.PIPE, shell=True) ho = process.communicate() ho = ho[0].split('\\n') print("[*] Hardware Overview:") for x in ho[4:]: if x: print((" - " + str(x.strip()))) except Exception as e: if Debug: print(("[!] Error enumerating Hardware Overview: " + str(e))) # Enum Users try: process = subprocess.Popen("dscacheutil -q user | grep -A 3 -B 2 -e uid:\ 5'[0-9][0-9]'", stdout=subprocess.PIPE, shell=True) users = process.communicate() users = users[0].split('\\n') print("[*] Client Users:") for x in users: if x: print(" - " + str(x.strip())) else: print() except Exception as e: if Debug: print("[!] Error enumerating OS Package: " + str(e)) pass # Enum Last Logins try: print("[*] Last Logins:") process = subprocess.Popen("last -10", stdout=subprocess.PIPE, shell=True) last = process.communicate() last = last[0].split('\\n') for x in last: if x.startswith('wtmp'): break if x: print(" - " + str(x.strip())) except Exception as e: if Debug: print("[!] Error Enumerating en0: " + str(e)) pass # Enum Hardware try: process = subprocess.Popen("networksetup -listallhardwareports", stdout=subprocess.PIPE, shell=True) hardware = process.communicate() hardware = hardware[0].split('\\n') print("[*] Installed Interfaces:") for x in hardware: if x: print(" - " + str(x.strip())) else: print() except Exception as e: if Debug: print("[!] Error Enumerating Installed Interfaces: " + str(e)) pass # Enum en0 try: process = subprocess.Popen("ipconfig getpacket en0", stdout=subprocess.PIPE, shell=True) inf = process.communicate() inf = inf[0].split('\\n') print("[*] en0 Interface:") for x in inf: if x: print(" - " + str(x.strip())) else: print() except Exception as e: if Debug: print("[!] Error Enumerating en0: " + str(e)) pass # Enum Hosts DNS file try: process = subprocess.Popen("cat /private/etc/hosts", stdout=subprocess.PIPE, shell=True) hosts = process.communicate() hosts = hosts[0].split('\\n') print("[*] DNS Hosts File:") for x in hosts: if x: if x.startswith("#"): pass else: print(" - " + str(x.strip())) else: print() except Exception as e: if Debug: print("[!] Error Enumerating Hosts File: " + str(e)) pass # Enum bash history try: location = home + "/.bash_history" with open(location, 'r') as myfile: HistoryResult = myfile.readlines() HistoryCount = HistoryCount * -1 print("[*] Enumerating User Bash History") print(" - History count size: " + str(len(HistoryResult))) for item in HistoryResult[HistoryCount:]: print(" * " + str(item.strip())) print("[*] SSH commands in History: ") for item in HistoryResult: if "ssh" in item.lower(): print(" * " + str(item.strip())) except Exception as e: if Debug: print("[!] Error enumerating user bash_history: " + str(e)) pass # Enum Wireless Connectivity Info try: process = subprocess.Popen(executable="/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport", args="-I", stdout=subprocess.PIPE, shell=True) wireless = process.communicate() if wireless[0] != '': wireless = wireless[0].split('\\n') print("[*] Wireless Connectivity Info:") for x in wireless: if x: print(" - " + str(x.strip())) else: print() except Exception as e: if Debug: print("[!] Error enumerating user Wireless Connectivity Info: " + str(e)) pass # Enum AV / Protection Software except Exception as e: print(e)"""<line_sep># add any arguments to the end exec <return>script<block_end><block_end>
<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>botocore.waiter Waiter<class_stmt>BundleTaskComplete(Waiter)<block_start><def_stmt>wait self BundleIds:List=<none> Filters:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_bundle_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasks>`_ **Request Syntax** :: waiter.wait( BundleIds=[ 'string', ], Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type BundleIds: list :param BundleIds: The bundle task IDs. Default: Describes all your bundle tasks. - *(string) --* :type Filters: list :param Filters: The filters. * ``bundle-id`` - The ID of the bundle task. * ``error-code`` - If the task failed, the error code returned. * ``error-message`` - If the task failed, the error message returned. * ``instance-id`` - The ID of the instance. * ``progress`` - The level of task completion, as a percentage (for example, 20%). * ``s3-bucket`` - The Amazon S3 bucket to store the AMI. * ``s3-prefix`` - The beginning of the AMI name. * ``start-time`` - The time the task started (for example, 2013-09-15T17:15:20.000Z). * ``state`` - The state of the task (``pending`` | ``waiting-for-shutdown`` | ``bundling`` | ``storing`` | ``cancelling`` | ``complete`` | ``failed`` ). * ``update-time`` - The time of the most recent update for the task. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ConversionTaskCancelled(Waiter)<block_start><def_stmt>wait self ConversionTaskIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_conversion_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks>`_ **Request Syntax** :: waiter.wait( ConversionTaskIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ConversionTaskIds: list :param ConversionTaskIds: The conversion task IDs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ConversionTaskCompleted(Waiter)<block_start><def_stmt>wait self ConversionTaskIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_conversion_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks>`_ **Request Syntax** :: waiter.wait( ConversionTaskIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ConversionTaskIds: list :param ConversionTaskIds: The conversion task IDs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ConversionTaskDeleted(Waiter)<block_start><def_stmt>wait self ConversionTaskIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_conversion_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks>`_ **Request Syntax** :: waiter.wait( ConversionTaskIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ConversionTaskIds: list :param ConversionTaskIds: The conversion task IDs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>CustomerGatewayAvailable(Waiter)<block_start><def_stmt>wait self CustomerGatewayIds:List=<none> Filters:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_customer_gateways` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGateways>`_ **Request Syntax** :: waiter.wait( CustomerGatewayIds=[ 'string', ], Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type CustomerGatewayIds: list :param CustomerGatewayIds: One or more customer gateway IDs. Default: Describes all your customer gateways. - *(string) --* :type Filters: list :param Filters: One or more filters. * ``bgp-asn`` - The customer gateway\'s Border Gateway Protocol (BGP) Autonomous System Number (ASN). * ``customer-gateway-id`` - The ID of the customer gateway. * ``ip-address`` - The IP address of the customer gateway\'s Internet-routable external interface. * ``state`` - The state of the customer gateway (``pending`` | ``available`` | ``deleting`` | ``deleted`` ). * ``type`` - The type of customer gateway. Currently, the only supported type is ``ipsec.1`` . * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ExportTaskCancelled(Waiter)<block_start><def_stmt>wait self ExportTaskIds:List=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_export_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks>`_ **Request Syntax** :: waiter.wait( ExportTaskIds=[ 'string', ], WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ExportTaskIds: list :param ExportTaskIds: The export task IDs. - *(string) --* :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ExportTaskCompleted(Waiter)<block_start><def_stmt>wait self ExportTaskIds:List=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_export_tasks` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks>`_ **Request Syntax** :: waiter.wait( ExportTaskIds=[ 'string', ], WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ExportTaskIds: list :param ExportTaskIds: The export task IDs. - *(string) --* :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ImageAvailable(Waiter)<block_start><def_stmt>wait self ExecutableUsers:List=<none> Filters:List=<none> ImageIds:List=<none> Owners:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_images` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages>`_ **Request Syntax** :: waiter.wait( ExecutableUsers=[ 'string', ], Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], ImageIds=[ 'string', ], Owners=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ExecutableUsers: list :param ExecutableUsers: Scopes the images by users with explicit launch permissions. Specify an AWS account ID, ``self`` (the sender of the request), or ``all`` (public AMIs). - *(string) --* :type Filters: list :param Filters: The filters. * ``architecture`` - The image architecture (``i386`` | ``x86_64`` ). * ``block-device-mapping.delete-on-termination`` - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination. * ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``block-device-mapping.snapshot-id`` - The ID of the snapshot used for the EBS volume. * ``block-device-mapping.volume-size`` - The volume size of the EBS volume, in GiB. * ``block-device-mapping.volume-type`` - The volume type of the EBS volume (``gp2`` | ``io1`` | ``st1`` | ``sc1`` | ``standard`` ). * ``block-device-mapping.encrypted`` - A Boolean that indicates whether the EBS volume is encrypted. * ``description`` - The description of the image (provided during image creation). * ``ena-support`` - A Boolean that indicates whether enhanced networking with ENA is enabled. * ``hypervisor`` - The hypervisor type (``ovm`` | ``xen`` ). * ``image-id`` - The ID of the image. * ``image-type`` - The image type (``machine`` | ``kernel`` | ``ramdisk`` ). * ``is-public`` - A Boolean that indicates whether the image is public. * ``kernel-id`` - The kernel ID. * ``manifest-location`` - The location of the image manifest. * ``name`` - The name of the AMI (provided during image creation). * ``owner-alias`` - String value from an Amazon-maintained list (``amazon`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console. * ``owner-id`` - The AWS account ID of the image owner. * ``platform`` - The platform. To only list Windows-based AMIs, use ``windows`` . * ``product-code`` - The product code. * ``product-code.type`` - The type of the product code (``devpay`` | ``marketplace`` ). * ``ramdisk-id`` - The RAM disk ID. * ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ). * ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ). * ``state`` - The state of the image (``available`` | ``pending`` | ``failed`` ). * ``state-reason-code`` - The reason code for the state change. * ``state-reason-message`` - The message for the state change. * ``sriov-net-support`` - A value of ``simple`` indicates that enhanced networking with the Intel 82599 VF interface is enabled. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``virtualization-type`` - The virtualization type (``paravirtual`` | ``hvm`` ). - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type ImageIds: list :param ImageIds: The image IDs. Default: Describes all images available to you. - *(string) --* :type Owners: list :param Owners: Filters the images by the owner. Specify an AWS account ID, ``self`` (owner is the sender of the request), or an AWS owner alias (valid values are ``amazon`` | ``aws-marketplace`` | ``microsoft`` ). Omitting this option returns all images for which you have launch permissions, regardless of ownership. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>ImageExists(Waiter)<block_start><def_stmt>wait self ExecutableUsers:List=<none> Filters:List=<none> ImageIds:List=<none> Owners:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_images` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages>`_ **Request Syntax** :: waiter.wait( ExecutableUsers=[ 'string', ], Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], ImageIds=[ 'string', ], Owners=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type ExecutableUsers: list :param ExecutableUsers: Scopes the images by users with explicit launch permissions. Specify an AWS account ID, ``self`` (the sender of the request), or ``all`` (public AMIs). - *(string) --* :type Filters: list :param Filters: The filters. * ``architecture`` - The image architecture (``i386`` | ``x86_64`` ). * ``block-device-mapping.delete-on-termination`` - A Boolean value that indicates whether the Amazon EBS volume is deleted on instance termination. * ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``block-device-mapping.snapshot-id`` - The ID of the snapshot used for the EBS volume. * ``block-device-mapping.volume-size`` - The volume size of the EBS volume, in GiB. * ``block-device-mapping.volume-type`` - The volume type of the EBS volume (``gp2`` | ``io1`` | ``st1`` | ``sc1`` | ``standard`` ). * ``block-device-mapping.encrypted`` - A Boolean that indicates whether the EBS volume is encrypted. * ``description`` - The description of the image (provided during image creation). * ``ena-support`` - A Boolean that indicates whether enhanced networking with ENA is enabled. * ``hypervisor`` - The hypervisor type (``ovm`` | ``xen`` ). * ``image-id`` - The ID of the image. * ``image-type`` - The image type (``machine`` | ``kernel`` | ``ramdisk`` ). * ``is-public`` - A Boolean that indicates whether the image is public. * ``kernel-id`` - The kernel ID. * ``manifest-location`` - The location of the image manifest. * ``name`` - The name of the AMI (provided during image creation). * ``owner-alias`` - String value from an Amazon-maintained list (``amazon`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console. * ``owner-id`` - The AWS account ID of the image owner. * ``platform`` - The platform. To only list Windows-based AMIs, use ``windows`` . * ``product-code`` - The product code. * ``product-code.type`` - The type of the product code (``devpay`` | ``marketplace`` ). * ``ramdisk-id`` - The RAM disk ID. * ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ). * ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ). * ``state`` - The state of the image (``available`` | ``pending`` | ``failed`` ). * ``state-reason-code`` - The reason code for the state change. * ``state-reason-message`` - The message for the state change. * ``sriov-net-support`` - A value of ``simple`` indicates that enhanced networking with the Intel 82599 VF interface is enabled. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``virtualization-type`` - The virtualization type (``paravirtual`` | ``hvm`` ). - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type ImageIds: list :param ImageIds: The image IDs. Default: Describes all images available to you. - *(string) --* :type Owners: list :param Owners: Filters the images by the owner. Specify an AWS account ID, ``self`` (owner is the sender of the request), or an AWS owner alias (valid values are ``amazon`` | ``aws-marketplace`` | ``microsoft`` ). Omitting this option returns all images for which you have launch permissions, regardless of ownership. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>InstanceExists(Waiter)<block_start><def_stmt>wait self Filters:List=<none> InstanceIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_instances` every 5 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], InstanceIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='<PASSWORD>', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ). * ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ). * ``availability-zone`` - The Availability Zone of the instance. * ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` . * ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination. * ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``block-device-mapping.volume-id`` - The volume ID of the EBS volume. * ``client-token`` - The idempotency token you provided when you launched the instance. * ``dns-name`` - The public DNS name of the instance. * ``group-id`` - The ID of the security group for the instance. EC2-Classic only. * ``group-name`` - The name of the security group for the instance. EC2-Classic only. * ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation. * ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable. * ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ). * ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN. * ``image-id`` - The ID of the image used to launch the instance. * ``instance-id`` - The ID of the instance. * ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ). * ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). * ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ). * ``instance-type`` - The type of instance (for example, ``t2.micro`` ). * ``instance.group-id`` - The ID of the security group for the instance. * ``instance.group-name`` - The name of the security group for the instance. * ``ip-address`` - The public IPv4 address of the instance. * ``kernel-id`` - The kernel ID. * ``key-name`` - The name of the key pair used when the instance was launched. * ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). * ``launch-time`` - The time when the instance was launched. * ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ). * ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface. * ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. * ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface. * ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface. * ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface. * ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface. * ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. * ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address. * ``network-interface.attachment.attachment-id`` - The ID of the interface attachment. * ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached. * ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached. * ``network-interface.attachment.device-index`` - The device index to which the network interface is attached. * ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance. * ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated. * ``network-interface.availability-zone`` - The Availability Zone for the network interface. * ``network-interface.description`` - The description of the network interface. * ``network-interface.group-id`` - The ID of a security group associated with the network interface. * ``network-interface.group-name`` - The name of a security group associated with the network interface. * ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface. * ``network-interface.mac-address`` - The MAC address of the network interface. * ``network-interface.network-interface-id`` - The ID of the network interface. * ``network-interface.owner-id`` - The ID of the owner of the network interface. * ``network-interface.private-dns-name`` - The private DNS name of the network interface. * ``network-interface.requester-id`` - The requester ID for the network interface. * ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS. * ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ). * ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC. * ``network-interface.subnet-id`` - The ID of the subnet for the network interface. * ``network-interface.vpc-id`` - The ID of the VPC for the network interface. * ``owner-id`` - The AWS account ID of the instance owner. * ``placement-group-name`` - The name of the placement group for the instance. * ``placement-partition-number`` - The partition in which the instance is located. * ``platform`` - The platform. To list only Windows instances, use ``windows`` . * ``private-dns-name`` - The private IPv4 DNS name of the instance. * ``private-ip-address`` - The private IPv4 address of the instance. * ``product-code`` - The product code associated with the AMI used to launch the instance. * ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ). * ``ramdisk-id`` - The RAM disk ID. * ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter. * ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on). * ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. * ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ). * ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ). * ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC. * ``spot-instance-request-id`` - The ID of the Spot Instance request. * ``state-reason-code`` - The reason code for the state change. * ``state-reason-message`` - A message that describes the state change. * ``subnet-id`` - The ID of the subnet for the instance. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. * ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ). * ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ). * ``vpc-id`` - The ID of the VPC that the instance is running in. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type InstanceIds: list :param InstanceIds: The instance IDs. Default: Describes all your instances. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call. :type NextToken: string :param NextToken: The token to request the next page of results. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 5 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>InstanceRunning(Waiter)<block_start><def_stmt>wait self Filters:List=<none> InstanceIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_instances` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], InstanceIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='string', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ). * ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ). * ``availability-zone`` - The Availability Zone of the instance. * ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` . * ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination. * ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``block-device-mapping.volume-id`` - The volume ID of the EBS volume. * ``client-token`` - The idempotency token you provided when you launched the instance. * ``dns-name`` - The public DNS name of the instance. * ``group-id`` - The ID of the security group for the instance. EC2-Classic only. * ``group-name`` - The name of the security group for the instance. EC2-Classic only. * ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation. * ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable. * ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ). * ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN. * ``image-id`` - The ID of the image used to launch the instance. * ``instance-id`` - The ID of the instance. * ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ). * ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). * ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ). * ``instance-type`` - The type of instance (for example, ``t2.micro`` ). * ``instance.group-id`` - The ID of the security group for the instance. * ``instance.group-name`` - The name of the security group for the instance. * ``ip-address`` - The public IPv4 address of the instance. * ``kernel-id`` - The kernel ID. * ``key-name`` - The name of the key pair used when the instance was launched. * ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). * ``launch-time`` - The time when the instance was launched. * ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ). * ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface. * ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. * ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface. * ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface. * ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface. * ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface. * ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. * ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address. * ``network-interface.attachment.attachment-id`` - The ID of the interface attachment. * ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached. * ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached. * ``network-interface.attachment.device-index`` - The device index to which the network interface is attached. * ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance. * ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated. * ``network-interface.availability-zone`` - The Availability Zone for the network interface. * ``network-interface.description`` - The description of the network interface. * ``network-interface.group-id`` - The ID of a security group associated with the network interface. * ``network-interface.group-name`` - The name of a security group associated with the network interface. * ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface. * ``network-interface.mac-address`` - The MAC address of the network interface. * ``network-interface.network-interface-id`` - The ID of the network interface. * ``network-interface.owner-id`` - The ID of the owner of the network interface. * ``network-interface.private-dns-name`` - The private DNS name of the network interface. * ``network-interface.requester-id`` - The requester ID for the network interface. * ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS. * ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ). * ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC. * ``network-interface.subnet-id`` - The ID of the subnet for the network interface. * ``network-interface.vpc-id`` - The ID of the VPC for the network interface. * ``owner-id`` - The AWS account ID of the instance owner. * ``placement-group-name`` - The name of the placement group for the instance. * ``placement-partition-number`` - The partition in which the instance is located. * ``platform`` - The platform. To list only Windows instances, use ``windows`` . * ``private-dns-name`` - The private IPv4 DNS name of the instance. * ``private-ip-address`` - The private IPv4 address of the instance. * ``product-code`` - The product code associated with the AMI used to launch the instance. * ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ). * ``ramdisk-id`` - The RAM disk ID. * ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter. * ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on). * ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. * ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ). * ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ). * ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC. * ``spot-instance-request-id`` - The ID of the Spot Instance request. * ``state-reason-code`` - The reason code for the state change. * ``state-reason-message`` - A message that describes the state change. * ``subnet-id`` - The ID of the subnet for the instance. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. * ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ). * ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ). * ``vpc-id`` - The ID of the VPC that the instance is running in. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type InstanceIds: list :param InstanceIds: The instance IDs. Default: Describes all your instances. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call. :type NextToken: string :param NextToken: The token to request the next page of results. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>InstanceStatusOk(Waiter)<block_start><def_stmt>wait self Filters:List=<none> InstanceIds:List=<none> MaxResults:int=<none> NextToken:str=<none> DryRun:bool=<none> IncludeAllInstances:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_instance_status` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], InstanceIds=[ 'string', ], MaxResults=123, NextToken='string', DryRun=True|False, IncludeAllInstances=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``availability-zone`` - The Availability Zone of the instance. * ``event.code`` - The code for the scheduled event (``instance-reboot`` | ``system-reboot`` | ``system-maintenance`` | ``instance-retirement`` | ``instance-stop`` ). * ``event.description`` - A description of the event. * ``event.instance-event-id`` - The ID of the event whose date and time you are modifying. * ``event.not-after`` - The latest end time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ). * ``event.not-before`` - The earliest start time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ). * ``event.not-before-deadline`` - The deadline for starting the event (for example, ``2014-09-15T17:15:20.000Z`` ). * ``instance-state-code`` - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). * ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ). * ``instance-status.reachability`` - Filters on instance status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ). * ``instance-status.status`` - The status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ). * ``system-status.reachability`` - Filters on system status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ). * ``system-status.status`` - The system status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ). - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type InstanceIds: list :param InstanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. - *(string) --* :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call. :type NextToken: string :param NextToken: The token to retrieve the next page of results. :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type IncludeAllInstances: boolean :param IncludeAllInstances: When ``true`` , includes the health status for all instances. When ``false`` , includes the health status for running instances only. Default: ``false`` :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>InstanceStopped(Waiter)<block_start><def_stmt>wait self Filters:List=<none> InstanceIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_instances` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], InstanceIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='<PASSWORD>', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ). * ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ). * ``availability-zone`` - The Availability Zone of the instance. * ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` . * ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination. * ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``block-device-mapping.volume-id`` - The volume ID of the EBS volume. * ``client-token`` - The idempotency token you provided when you launched the instance. * ``dns-name`` - The public DNS name of the instance. * ``group-id`` - The ID of the security group for the instance. EC2-Classic only. * ``group-name`` - The name of the security group for the instance. EC2-Classic only. * ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation. * ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable. * ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ). * ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN. * ``image-id`` - The ID of the image used to launch the instance. * ``instance-id`` - The ID of the instance. * ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ). * ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). * ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ). * ``instance-type`` - The type of instance (for example, ``t2.micro`` ). * ``instance.group-id`` - The ID of the security group for the instance. * ``instance.group-name`` - The name of the security group for the instance. * ``ip-address`` - The public IPv4 address of the instance. * ``kernel-id`` - The kernel ID. * ``key-name`` - The name of the key pair used when the instance was launched. * ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). * ``launch-time`` - The time when the instance was launched. * ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ). * ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface. * ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. * ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface. * ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface. * ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface. * ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface. * ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. * ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address. * ``network-interface.attachment.attachment-id`` - The ID of the interface attachment. * ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached. * ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached. * ``network-interface.attachment.device-index`` - The device index to which the network interface is attached. * ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance. * ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated. * ``network-interface.availability-zone`` - The Availability Zone for the network interface. * ``network-interface.description`` - The description of the network interface. * ``network-interface.group-id`` - The ID of a security group associated with the network interface. * ``network-interface.group-name`` - The name of a security group associated with the network interface. * ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface. * ``network-interface.mac-address`` - The MAC address of the network interface. * ``network-interface.network-interface-id`` - The ID of the network interface. * ``network-interface.owner-id`` - The ID of the owner of the network interface. * ``network-interface.private-dns-name`` - The private DNS name of the network interface. * ``network-interface.requester-id`` - The requester ID for the network interface. * ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS. * ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ). * ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC. * ``network-interface.subnet-id`` - The ID of the subnet for the network interface. * ``network-interface.vpc-id`` - The ID of the VPC for the network interface. * ``owner-id`` - The AWS account ID of the instance owner. * ``placement-group-name`` - The name of the placement group for the instance. * ``placement-partition-number`` - The partition in which the instance is located. * ``platform`` - The platform. To list only Windows instances, use ``windows`` . * ``private-dns-name`` - The private IPv4 DNS name of the instance. * ``private-ip-address`` - The private IPv4 address of the instance. * ``product-code`` - The product code associated with the AMI used to launch the instance. * ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ). * ``ramdisk-id`` - The RAM disk ID. * ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter. * ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on). * ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. * ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ). * ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ). * ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC. * ``spot-instance-request-id`` - The ID of the Spot Instance request. * ``state-reason-code`` - The reason code for the state change. * ``state-reason-message`` - A message that describes the state change. * ``subnet-id`` - The ID of the subnet for the instance. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. * ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ). * ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ). * ``vpc-id`` - The ID of the VPC that the instance is running in. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type InstanceIds: list :param InstanceIds: The instance IDs. Default: Describes all your instances. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call. :type NextToken: string :param NextToken: The token to request the next page of results. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>InstanceTerminated(Waiter)<block_start><def_stmt>wait self Filters:List=<none> InstanceIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_instances` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], InstanceIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='string', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``affinity`` - The affinity setting for an instance running on a Dedicated Host (``default`` | ``host`` ). * ``architecture`` - The instance architecture (``i386`` | ``x86_64`` ). * ``availability-zone`` - The Availability Zone of the instance. * ``block-device-mapping.attach-time`` - The attach time for an EBS volume mapped to the instance, for example, ``2010-09-15T17:15:20.000Z`` . * ``block-device-mapping.delete-on-termination`` - A Boolean that indicates whether the EBS volume is deleted on instance termination. * ``block-device-mapping.device-name`` - The device name specified in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``block-device-mapping.status`` - The status for the EBS volume (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``block-device-mapping.volume-id`` - The volume ID of the EBS volume. * ``client-token`` - The idempotency token you provided when you launched the instance. * ``dns-name`` - The public DNS name of the instance. * ``group-id`` - The ID of the security group for the instance. EC2-Classic only. * ``group-name`` - The name of the security group for the instance. EC2-Classic only. * ``hibernation-options.configured`` - A Boolean that indicates whether the instance is enabled for hibernation. A value of ``true`` means that the instance is enabled for hibernation. * ``host-id`` - The ID of the Dedicated Host on which the instance is running, if applicable. * ``hypervisor`` - The hypervisor type of the instance (``ovm`` | ``xen`` ). * ``iam-instance-profile.arn`` - The instance profile associated with the instance. Specified as an ARN. * ``image-id`` - The ID of the image used to launch the instance. * ``instance-id`` - The ID of the instance. * ``instance-lifecycle`` - Indicates whether this is a Spot Instance or a Scheduled Instance (``spot`` | ``scheduled`` ). * ``instance-state-code`` - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). * ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ). * ``instance-type`` - The type of instance (for example, ``t2.micro`` ). * ``instance.group-id`` - The ID of the security group for the instance. * ``instance.group-name`` - The name of the security group for the instance. * ``ip-address`` - The public IPv4 address of the instance. * ``kernel-id`` - The kernel ID. * ``key-name`` - The name of the key pair used when the instance was launched. * ``launch-index`` - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). * ``launch-time`` - The time when the instance was launched. * ``monitoring-state`` - Indicates whether detailed monitoring is enabled (``disabled`` | ``enabled`` ). * ``network-interface.addresses.private-ip-address`` - The private IPv4 address associated with the network interface. * ``network-interface.addresses.primary`` - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. * ``network-interface.addresses.association.public-ip`` - The ID of the association of an Elastic IP address (IPv4) with a network interface. * ``network-interface.addresses.association.ip-owner-id`` - The owner ID of the private IPv4 address associated with the network interface. * ``network-interface.association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface. * ``network-interface.association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface. * ``network-interface.association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. * ``network-interface.association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address. * ``network-interface.attachment.attachment-id`` - The ID of the interface attachment. * ``network-interface.attachment.instance-id`` - The ID of the instance to which the network interface is attached. * ``network-interface.attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached. * ``network-interface.attachment.device-index`` - The device index to which the network interface is attached. * ``network-interface.attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``network-interface.attachment.attach-time`` - The time that the network interface was attached to an instance. * ``network-interface.attachment.delete-on-termination`` - Specifies whether the attachment is deleted when an instance is terminated. * ``network-interface.availability-zone`` - The Availability Zone for the network interface. * ``network-interface.description`` - The description of the network interface. * ``network-interface.group-id`` - The ID of a security group associated with the network interface. * ``network-interface.group-name`` - The name of a security group associated with the network interface. * ``network-interface.ipv6-addresses.ipv6-address`` - The IPv6 address associated with the network interface. * ``network-interface.mac-address`` - The MAC address of the network interface. * ``network-interface.network-interface-id`` - The ID of the network interface. * ``network-interface.owner-id`` - The ID of the owner of the network interface. * ``network-interface.private-dns-name`` - The private DNS name of the network interface. * ``network-interface.requester-id`` - The requester ID for the network interface. * ``network-interface.requester-managed`` - Indicates whether the network interface is being managed by AWS. * ``network-interface.status`` - The status of the network interface (``available`` ) | ``in-use`` ). * ``network-interface.source-dest-check`` - Whether the network interface performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC. * ``network-interface.subnet-id`` - The ID of the subnet for the network interface. * ``network-interface.vpc-id`` - The ID of the VPC for the network interface. * ``owner-id`` - The AWS account ID of the instance owner. * ``placement-group-name`` - The name of the placement group for the instance. * ``placement-partition-number`` - The partition in which the instance is located. * ``platform`` - The platform. To list only Windows instances, use ``windows`` . * ``private-dns-name`` - The private IPv4 DNS name of the instance. * ``private-ip-address`` - The private IPv4 address of the instance. * ``product-code`` - The product code associated with the AMI used to launch the instance. * ``product-code.type`` - The type of product code (``devpay`` | ``marketplace`` ). * ``ramdisk-id`` - The RAM disk ID. * ``reason`` - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter. * ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on). * ``reservation-id`` - The ID of the instance\'s reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. * ``root-device-name`` - The device name of the root device volume (for example, ``/dev/sda1`` ). * ``root-device-type`` - The type of the root device volume (``ebs`` | ``instance-store`` ). * ``source-dest-check`` - Indicates whether the instance performs source/destination checking. A value of ``true`` means that checking is enabled, and ``false`` means that checking is disabled. The value must be ``false`` for the instance to perform network address translation (NAT) in your VPC. * ``spot-instance-request-id`` - The ID of the Spot Instance request. * ``state-reason-code`` - The reason code for the state change. * ``state-reason-message`` - A message that describes the state change. * ``subnet-id`` - The ID of the subnet for the instance. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. * ``tenancy`` - The tenancy of an instance (``dedicated`` | ``default`` | ``host`` ). * ``virtualization-type`` - The virtualization type of the instance (``paravirtual`` | ``hvm`` ). * ``vpc-id`` - The ID of the VPC that the instance is running in. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type InstanceIds: list :param InstanceIds: The instance IDs. Default: Describes all your instances. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call. :type NextToken: string :param NextToken: The token to request the next page of results. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>KeyPairExists(Waiter)<block_start><def_stmt>wait self Filters:List=<none> KeyNames:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_key_pairs` every 5 seconds until a successful state is reached. An error is returned after 6 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairs>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], KeyNames=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``fingerprint`` - The fingerprint of the key pair. * ``key-name`` - The name of the key pair. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type KeyNames: list :param KeyNames: The key pair names. Default: Describes all your key pairs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 5 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 6 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>NatGatewayAvailable(Waiter)<block_start><def_stmt>wait self Filters:List=<none> MaxResults:int=<none> NatGatewayIds:List=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_nat_gateways` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGateways>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], MaxResults=123, NatGatewayIds=[ 'string', ], NextToken='string', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``nat-gateway-id`` - The ID of the NAT gateway. * ``state`` - The state of the NAT gateway (``pending`` | ``failed`` | ``available`` | ``deleting`` | ``deleted`` ). * ``subnet-id`` - The ID of the subnet in which the NAT gateway resides. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-id`` - The ID of the VPC in which the NAT gateway resides. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type MaxResults: integer :param MaxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value. :type NatGatewayIds: list :param NatGatewayIds: One or more NAT gateway IDs. - *(string) --* :type NextToken: string :param NextToken: The token for the next page of results. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>NetworkInterfaceAvailable(Waiter)<block_start><def_stmt>wait self Filters:List=<none> DryRun:bool=<none> NetworkInterfaceIds:List=<none> NextToken:str=<none> MaxResults:int=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_network_interfaces` every 20 seconds until a successful state is reached. An error is returned after 10 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaces>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], DryRun=True|False, NetworkInterfaceIds=[ 'string', ], NextToken='string', MaxResults=123, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``addresses.private-ip-address`` - The private IPv4 addresses associated with the network interface. * ``addresses.primary`` - Whether the private IPv4 address is the primary IP address associated with the network interface. * ``addresses.association.public-ip`` - The association ID returned when the network interface was associated with the Elastic IP address (IPv4). * ``addresses.association.owner-id`` - The owner ID of the addresses associated with the network interface. * ``association.association-id`` - The association ID returned when the network interface was associated with an IPv4 address. * ``association.allocation-id`` - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. * ``association.ip-owner-id`` - The owner of the Elastic IP address (IPv4) associated with the network interface. * ``association.public-ip`` - The address of the Elastic IP address (IPv4) bound to the network interface. * ``association.public-dns-name`` - The public DNS name for the network interface (IPv4). * ``attachment.attachment-id`` - The ID of the interface attachment. * ``attachment.attach.time`` - The time that the network interface was attached to an instance. * ``attachment.delete-on-termination`` - Indicates whether the attachment is deleted when an instance is terminated. * ``attachment.device-index`` - The device index to which the network interface is attached. * ``attachment.instance-id`` - The ID of the instance to which the network interface is attached. * ``attachment.instance-owner-id`` - The owner ID of the instance to which the network interface is attached. * ``attachment.nat-gateway-id`` - The ID of the NAT gateway to which the network interface is attached. * ``attachment.status`` - The status of the attachment (``attaching`` | ``attached`` | ``detaching`` | ``detached`` ). * ``availability-zone`` - The Availability Zone of the network interface. * ``description`` - The description of the network interface. * ``group-id`` - The ID of a security group associated with the network interface. * ``group-name`` - The name of a security group associated with the network interface. * ``ipv6-addresses.ipv6-address`` - An IPv6 address associated with the network interface. * ``mac-address`` - The MAC address of the network interface. * ``network-interface-id`` - The ID of the network interface. * ``owner-id`` - The AWS account ID of the network interface owner. * ``private-ip-address`` - The private IPv4 address or addresses of the network interface. * ``private-dns-name`` - The private DNS name of the network interface (IPv4). * ``requester-id`` - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on). * ``requester-managed`` - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on). * ``source-dest-check`` - Indicates whether the network interface performs source/destination checking. A value of ``true`` means checking is enabled, and ``false`` means checking is disabled. The value must be ``false`` for the network interface to perform network address translation (NAT) in your VPC. * ``status`` - The status of the network interface. If the network interface is not attached to an instance, the status is ``available`` ; if a network interface is attached to an instance the status is ``in-use`` . * ``subnet-id`` - The ID of the subnet for the network interface. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-id`` - The ID of the VPC for the network interface. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type NetworkInterfaceIds: list :param NetworkInterfaceIds: One or more network interface IDs. Default: Describes all your network interfaces. - *(string) --* :type NextToken: string :param NextToken: The token to retrieve the next page of results. :type MaxResults: integer :param MaxResults: The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 20 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 10 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>PasswordDataAvailable(Waiter)<block_start><def_stmt>wait self InstanceId:str DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.get_password_data` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordData>`_ **Request Syntax** :: waiter.wait( InstanceId='string', DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type InstanceId: string :param InstanceId: **[REQUIRED]** The ID of the Windows instance. :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>SnapshotCompleted(Waiter)<block_start><def_stmt>wait self Filters:List=<none> MaxResults:int=<none> NextToken:str=<none> OwnerIds:List=<none> RestorableByUserIds:List=<none> SnapshotIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_snapshots` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshots>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], MaxResults=123, NextToken='string', OwnerIds=[ 'string', ], RestorableByUserIds=[ 'string', ], SnapshotIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``description`` - A description of the snapshot. * ``encrypted`` - Indicates whether the snapshot is encrypted (``true`` | ``false`` ) * ``owner-alias`` - Value from an Amazon-maintained list (``amazon`` | ``self`` | ``all`` | ``aws-marketplace`` | ``microsoft`` ) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console. * ``owner-id`` - The ID of the AWS account that owns the snapshot. * ``progress`` - The progress of the snapshot, as a percentage (for example, 80%). * ``snapshot-id`` - The snapshot ID. * ``start-time`` - The time stamp when the snapshot was initiated. * ``status`` - The status of the snapshot (``pending`` | ``completed`` | ``error`` ). * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``volume-id`` - The ID of the volume the snapshot is for. * ``volume-size`` - The size of the volume, in GiB. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type MaxResults: integer :param MaxResults: The maximum number of snapshot results returned by ``DescribeSnapshots`` in paginated output. When this parameter is used, ``DescribeSnapshots`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeSnapshots`` request with the returned ``NextToken`` value. This value can be between 5 and 1000; if ``MaxResults`` is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then ``DescribeSnapshots`` returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request. :type NextToken: string :param NextToken: The ``NextToken`` value returned from a previous paginated ``DescribeSnapshots`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return. :type OwnerIds: list :param OwnerIds: Describes the snapshots owned by these owners. - *(string) --* :type RestorableByUserIds: list :param RestorableByUserIds: The IDs of the AWS accounts that can create volumes from the snapshot. - *(string) --* :type SnapshotIds: list :param SnapshotIds: The snapshot IDs. Default: Describes the snapshots for which you have create volume permissions. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>SpotInstanceRequestFulfilled(Waiter)<block_start><def_stmt>wait self Filters:List=<none> DryRun:bool=<none> SpotInstanceRequestIds:List=<none> NextToken:str=<none> MaxResults:int=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_spot_instance_requests` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequests>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], DryRun=True|False, SpotInstanceRequestIds=[ 'string', ], NextToken='<PASSWORD>', MaxResults=123, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``availability-zone-group`` - The Availability Zone group. * ``create-time`` - The time stamp when the Spot Instance request was created. * ``fault-code`` - The fault code related to the request. * ``fault-message`` - The fault message related to the request. * ``instance-id`` - The ID of the instance that fulfilled the request. * ``launch-group`` - The Spot Instance launch group. * ``launch.block-device-mapping.delete-on-termination`` - Indicates whether the EBS volume is deleted on instance termination. * ``launch.block-device-mapping.device-name`` - The device name for the volume in the block device mapping (for example, ``/dev/sdh`` or ``xvdh`` ). * ``launch.block-device-mapping.snapshot-id`` - The ID of the snapshot for the EBS volume. * ``launch.block-device-mapping.volume-size`` - The size of the EBS volume, in GiB. * ``launch.block-device-mapping.volume-type`` - The type of EBS volume: ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic. * ``launch.group-id`` - The ID of the security group for the instance. * ``launch.group-name`` - The name of the security group for the instance. * ``launch.image-id`` - The ID of the AMI. * ``launch.instance-type`` - The type of instance (for example, ``m3.medium`` ). * ``launch.kernel-id`` - The kernel ID. * ``launch.key-name`` - The name of the key pair the instance launched with. * ``launch.monitoring-enabled`` - Whether detailed monitoring is enabled for the Spot Instance. * ``launch.ramdisk-id`` - The RAM disk ID. * ``launched-availability-zone`` - The Availability Zone in which the request is launched. * ``network-interface.addresses.primary`` - Indicates whether the IP address is the primary private IP address. * ``network-interface.delete-on-termination`` - Indicates whether the network interface is deleted when the instance is terminated. * ``network-interface.description`` - A description of the network interface. * ``network-interface.device-index`` - The index of the device for the network interface attachment on the instance. * ``network-interface.group-id`` - The ID of the security group associated with the network interface. * ``network-interface.network-interface-id`` - The ID of the network interface. * ``network-interface.private-ip-address`` - The primary private IP address of the network interface. * ``network-interface.subnet-id`` - The ID of the subnet for the instance. * ``product-description`` - The product description associated with the instance (``Linux/UNIX`` | ``Windows`` ). * ``spot-instance-request-id`` - The Spot Instance request ID. * ``spot-price`` - The maximum hourly price for any Spot Instance launched to fulfill the request. * ``state`` - The state of the Spot Instance request (``open`` | ``active`` | ``closed`` | ``cancelled`` | ``failed`` ). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see `Spot Request Status <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html>`__ in the *Amazon EC2 User Guide for Linux Instances* . * ``status-code`` - The short code describing the most recent evaluation of your Spot Instance request. * ``status-message`` - The message explaining the status of the Spot Instance request. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``type`` - The type of Spot Instance request (``one-time`` | ``persistent`` ). * ``valid-from`` - The start date of the request. * ``valid-until`` - The end date of the request. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type SpotInstanceRequestIds: list :param SpotInstanceRequestIds: One or more Spot Instance request IDs. - *(string) --* :type NextToken: string :param NextToken: The token to request the next set of results. This value is ``null`` when there are no more results to return. :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. Specify a value between 5 and 1000. To retrieve the remaining results, make another call with the returned ``NextToken`` value. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>SubnetAvailable(Waiter)<block_start><def_stmt>wait self Filters:List=<none> SubnetIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_subnets` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnets>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], SubnetIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``availability-zone`` - The Availability Zone for the subnet. You can also use ``availabilityZone`` as the filter name. * ``availability-zone-id`` - The ID of the Availability Zone for the subnet. You can also use ``availabilityZoneId`` as the filter name. * ``available-ip-address-count`` - The number of IPv4 addresses in the subnet that are available. * ``cidr-block`` - The IPv4 CIDR block of the subnet. The CIDR block you specify must exactly match the subnet\'s CIDR block for information to be returned for the subnet. You can also use ``cidr`` or ``cidrBlock`` as the filter names. * ``default-for-az`` - Indicates whether this is the default subnet for the Availability Zone. You can also use ``defaultForAz`` as the filter name. * ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the subnet. * ``ipv6-cidr-block-association.association-id`` - An association ID for an IPv6 CIDR block associated with the subnet. * ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the subnet. * ``owner-id`` - The ID of the AWS account that owns the subnet. * ``state`` - The state of the subnet (``pending`` | ``available`` ). * ``subnet-arn`` - The Amazon Resource Name (ARN) of the subnet. * ``subnet-id`` - The ID of the subnet. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-id`` - The ID of the VPC for the subnet. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type SubnetIds: list :param SubnetIds: One or more subnet IDs. Default: Describes all your subnets. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>SystemStatusOk(Waiter)<block_start><def_stmt>wait self Filters:List=<none> InstanceIds:List=<none> MaxResults:int=<none> NextToken:str=<none> DryRun:bool=<none> IncludeAllInstances:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_instance_status` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], InstanceIds=[ 'string', ], MaxResults=123, NextToken='string', DryRun=True|False, IncludeAllInstances=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``availability-zone`` - The Availability Zone of the instance. * ``event.code`` - The code for the scheduled event (``instance-reboot`` | ``system-reboot`` | ``system-maintenance`` | ``instance-retirement`` | ``instance-stop`` ). * ``event.description`` - A description of the event. * ``event.instance-event-id`` - The ID of the event whose date and time you are modifying. * ``event.not-after`` - The latest end time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ). * ``event.not-before`` - The earliest start time for the scheduled event (for example, ``2014-09-15T17:15:20.000Z`` ). * ``event.not-before-deadline`` - The deadline for starting the event (for example, ``2014-09-15T17:15:20.000Z`` ). * ``instance-state-code`` - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). * ``instance-state-name`` - The state of the instance (``pending`` | ``running`` | ``shutting-down`` | ``terminated`` | ``stopping`` | ``stopped`` ). * ``instance-status.reachability`` - Filters on instance status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ). * ``instance-status.status`` - The status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ). * ``system-status.reachability`` - Filters on system status where the name is ``reachability`` (``passed`` | ``failed`` | ``initializing`` | ``insufficient-data`` ). * ``system-status.status`` - The system status of the instance (``ok`` | ``impaired`` | ``initializing`` | ``insufficient-data`` | ``not-applicable`` ). - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type InstanceIds: list :param InstanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. - *(string) --* :type MaxResults: integer :param MaxResults: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned ``NextToken`` value. This value can be between 5 and 1000. You cannot specify this parameter and the instance IDs parameter in the same call. :type NextToken: string :param NextToken: The token to retrieve the next page of results. :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type IncludeAllInstances: boolean :param IncludeAllInstances: When ``true`` , includes the health status for all instances. When ``false`` , includes the health status for running instances only. Default: ``false`` :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VolumeAvailable(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VolumeIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_volumes` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VolumeIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='string', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``attachment.attach-time`` - The time stamp when the attachment initiated. * ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination. * ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ). * ``attachment.instance-id`` - The ID of the instance the volume is attached to. * ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ). * ``availability-zone`` - The Availability Zone in which the volume was created. * ``create-time`` - The time stamp when the volume was created. * ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` ) * ``size`` - The size of the volume, in GiB. * ``snapshot-id`` - The snapshot from which the volume was created. * ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ). * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``volume-id`` - The volume ID. * ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VolumeIds: list :param VolumeIds: The volume IDs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of volume results returned by ``DescribeVolumes`` in paginated output. When this parameter is used, ``DescribeVolumes`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeVolumes`` request with the returned ``NextToken`` value. This value can be between 5 and 500; if ``MaxResults`` is given a value larger than 500, only 500 results are returned. If this parameter is not used, then ``DescribeVolumes`` returns all results. You cannot specify this parameter and the volume IDs parameter in the same request. :type NextToken: string :param NextToken: The ``NextToken`` value returned from a previous paginated ``DescribeVolumes`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VolumeDeleted(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VolumeIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_volumes` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VolumeIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='string', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``attachment.attach-time`` - The time stamp when the attachment initiated. * ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination. * ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ). * ``attachment.instance-id`` - The ID of the instance the volume is attached to. * ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ). * ``availability-zone`` - The Availability Zone in which the volume was created. * ``create-time`` - The time stamp when the volume was created. * ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` ) * ``size`` - The size of the volume, in GiB. * ``snapshot-id`` - The snapshot from which the volume was created. * ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ). * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``volume-id`` - The volume ID. * ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VolumeIds: list :param VolumeIds: The volume IDs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of volume results returned by ``DescribeVolumes`` in paginated output. When this parameter is used, ``DescribeVolumes`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeVolumes`` request with the returned ``NextToken`` value. This value can be between 5 and 500; if ``MaxResults`` is given a value larger than 500, only 500 results are returned. If this parameter is not used, then ``DescribeVolumes`` returns all results. You cannot specify this parameter and the volume IDs parameter in the same request. :type NextToken: string :param NextToken: The ``NextToken`` value returned from a previous paginated ``DescribeVolumes`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VolumeInUse(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VolumeIds:List=<none> DryRun:bool=<none> MaxResults:int=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_volumes` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VolumeIds=[ 'string', ], DryRun=True|False, MaxResults=123, NextToken='string', WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: The filters. * ``attachment.attach-time`` - The time stamp when the attachment initiated. * ``attachment.delete-on-termination`` - Whether the volume is deleted on instance termination. * ``attachment.device`` - The device name specified in the block device mapping (for example, ``/dev/sda1`` ). * ``attachment.instance-id`` - The ID of the instance the volume is attached to. * ``attachment.status`` - The attachment state (``attaching`` | ``attached`` | ``detaching`` ). * ``availability-zone`` - The Availability Zone in which the volume was created. * ``create-time`` - The time stamp when the volume was created. * ``encrypted`` - Indicates whether the volume is encrypted (``true`` | ``false`` ) * ``size`` - The size of the volume, in GiB. * ``snapshot-id`` - The snapshot from which the volume was created. * ``status`` - The status of the volume (``creating`` | ``available`` | ``in-use`` | ``deleting`` | ``deleted`` | ``error`` ). * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``volume-id`` - The volume ID. * ``volume-type`` - The Amazon EBS volume type. This can be ``gp2`` for General Purpose SSD, ``io1`` for Provisioned IOPS SSD, ``st1`` for Throughput Optimized HDD, ``sc1`` for Cold HDD, or ``standard`` for Magnetic volumes. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VolumeIds: list :param VolumeIds: The volume IDs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type MaxResults: integer :param MaxResults: The maximum number of volume results returned by ``DescribeVolumes`` in paginated output. When this parameter is used, ``DescribeVolumes`` only returns ``MaxResults`` results in a single page along with a ``NextToken`` response element. The remaining results of the initial request can be seen by sending another ``DescribeVolumes`` request with the returned ``NextToken`` value. This value can be between 5 and 500; if ``MaxResults`` is given a value larger than 500, only 500 results are returned. If this parameter is not used, then ``DescribeVolumes`` returns all results. You cannot specify this parameter and the volume IDs parameter in the same request. :type NextToken: string :param NextToken: The ``NextToken`` value returned from a previous paginated ``DescribeVolumes`` request where ``MaxResults`` was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the ``NextToken`` value. This value is ``null`` when there are no more results to return. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VpcAvailable(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VpcIds:List=<none> DryRun:bool=<none> NextToken:str=<none> MaxResults:int=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_vpcs` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VpcIds=[ 'string', ], DryRun=True|False, NextToken='string', MaxResults=123, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``cidr`` - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC\'s CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, ``/28`` ). * ``cidr-block-association.cidr-block`` - An IPv4 CIDR block associated with the VPC. * ``cidr-block-association.association-id`` - The association ID for an IPv4 CIDR block associated with the VPC. * ``cidr-block-association.state`` - The state of an IPv4 CIDR block associated with the VPC. * ``dhcp-options-id`` - The ID of a set of DHCP options. * ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the VPC. * ``ipv6-cidr-block-association.association-id`` - The association ID for an IPv6 CIDR block associated with the VPC. * ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the VPC. * ``isDefault`` - Indicates whether the VPC is the default VPC. * ``owner-id`` - The ID of the AWS account that owns the VPC. * ``state`` - The state of the VPC (``pending`` | ``available`` ). * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-id`` - The ID of the VPC. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VpcIds: list :param VpcIds: One or more VPC IDs. Default: Describes all your VPCs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type NextToken: string :param NextToken: The token for the next page of results. :type MaxResults: integer :param MaxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VpcExists(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VpcIds:List=<none> DryRun:bool=<none> NextToken:str=<none> MaxResults:int=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_vpcs` every 1 seconds until a successful state is reached. An error is returned after 5 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VpcIds=[ 'string', ], DryRun=True|False, NextToken='string', MaxResults=123, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``cidr`` - The primary IPv4 CIDR block of the VPC. The CIDR block you specify must exactly match the VPC\'s CIDR block for information to be returned for the VPC. Must contain the slash followed by one or two digits (for example, ``/28`` ). * ``cidr-block-association.cidr-block`` - An IPv4 CIDR block associated with the VPC. * ``cidr-block-association.association-id`` - The association ID for an IPv4 CIDR block associated with the VPC. * ``cidr-block-association.state`` - The state of an IPv4 CIDR block associated with the VPC. * ``dhcp-options-id`` - The ID of a set of DHCP options. * ``ipv6-cidr-block-association.ipv6-cidr-block`` - An IPv6 CIDR block associated with the VPC. * ``ipv6-cidr-block-association.association-id`` - The association ID for an IPv6 CIDR block associated with the VPC. * ``ipv6-cidr-block-association.state`` - The state of an IPv6 CIDR block associated with the VPC. * ``isDefault`` - Indicates whether the VPC is the default VPC. * ``owner-id`` - The ID of the AWS account that owns the VPC. * ``state`` - The state of the VPC (``pending`` | ``available`` ). * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-id`` - The ID of the VPC. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VpcIds: list :param VpcIds: One or more VPC IDs. Default: Describes all your VPCs. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type NextToken: string :param NextToken: The token for the next page of results. :type MaxResults: integer :param MaxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 1 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 5 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VpcPeeringConnectionDeleted(Waiter)<block_start><def_stmt>wait self Filters:List=<none> DryRun:bool=<none> VpcPeeringConnectionIds:List=<none> NextToken:str=<none> MaxResults:int=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_vpc_peering_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], DryRun=True|False, VpcPeeringConnectionIds=[ 'string', ], NextToken='string', MaxResults=123, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``accepter-vpc-info.cidr-block`` - The IPv4 CIDR block of the accepter VPC. * ``accepter-vpc-info.owner-id`` - The AWS account ID of the owner of the accepter VPC. * ``accepter-vpc-info.vpc-id`` - The ID of the accepter VPC. * ``expiration-time`` - The expiration date and time for the VPC peering connection. * ``requester-vpc-info.cidr-block`` - The IPv4 CIDR block of the requester\'s VPC. * ``requester-vpc-info.owner-id`` - The AWS account ID of the owner of the requester VPC. * ``requester-vpc-info.vpc-id`` - The ID of the requester VPC. * ``status-code`` - The status of the VPC peering connection (``pending-acceptance`` | ``failed`` | ``expired`` | ``provisioning`` | ``active`` | ``deleting`` | ``deleted`` | ``rejected`` ). * ``status-message`` - A message that provides more information about the status of the VPC peering connection, if applicable. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-peering-connection-id`` - The ID of the VPC peering connection. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type VpcPeeringConnectionIds: list :param VpcPeeringConnectionIds: One or more VPC peering connection IDs. Default: Describes all your VPC peering connections. - *(string) --* :type NextToken: string :param NextToken: The token for the next page of results. :type MaxResults: integer :param MaxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VpcPeeringConnectionExists(Waiter)<block_start><def_stmt>wait self Filters:List=<none> DryRun:bool=<none> VpcPeeringConnectionIds:List=<none> NextToken:str=<none> MaxResults:int=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_vpc_peering_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], DryRun=True|False, VpcPeeringConnectionIds=[ 'string', ], NextToken='string', MaxResults=123, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``accepter-vpc-info.cidr-block`` - The IPv4 CIDR block of the accepter VPC. * ``accepter-vpc-info.owner-id`` - The AWS account ID of the owner of the accepter VPC. * ``accepter-vpc-info.vpc-id`` - The ID of the accepter VPC. * ``expiration-time`` - The expiration date and time for the VPC peering connection. * ``requester-vpc-info.cidr-block`` - The IPv4 CIDR block of the requester\'s VPC. * ``requester-vpc-info.owner-id`` - The AWS account ID of the owner of the requester VPC. * ``requester-vpc-info.vpc-id`` - The ID of the requester VPC. * ``status-code`` - The status of the VPC peering connection (``pending-acceptance`` | ``failed`` | ``expired`` | ``provisioning`` | ``active`` | ``deleting`` | ``deleted`` | ``rejected`` ). * ``status-message`` - A message that provides more information about the status of the VPC peering connection, if applicable. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``vpc-peering-connection-id`` - The ID of the VPC peering connection. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type VpcPeeringConnectionIds: list :param VpcPeeringConnectionIds: One or more VPC peering connection IDs. Default: Describes all your VPC peering connections. - *(string) --* :type NextToken: string :param NextToken: The token for the next page of results. :type MaxResults: integer :param MaxResults: The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned ``nextToken`` value. :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VpnConnectionAvailable(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VpnConnectionIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_vpn_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VpnConnectionIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``customer-gateway-configuration`` - The configuration information for the customer gateway. * ``customer-gateway-id`` - The ID of a customer gateway associated with the VPN connection. * ``state`` - The state of the VPN connection (``pending`` | ``available`` | ``deleting`` | ``deleted`` ). * ``option.static-routes-only`` - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP). * ``route.destination-cidr-block`` - The destination CIDR block. This corresponds to the subnet used in a customer data center. * ``bgp-asn`` - The BGP Autonomous System Number (ASN) associated with a BGP device. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``type`` - The type of VPN connection. Currently the only supported type is ``ipsec.1`` . * ``vpn-connection-id`` - The ID of the VPN connection. * ``vpn-gateway-id`` - The ID of a virtual private gateway associated with the VPN connection. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VpnConnectionIds: list :param VpnConnectionIds: One or more VPN connection IDs. Default: Describes your VPN connections. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end><class_stmt>VpnConnectionDeleted(Waiter)<block_start><def_stmt>wait self Filters:List=<none> VpnConnectionIds:List=<none> DryRun:bool=<none> WaiterConfig:Dict=<none><block_start>""" Polls :py:meth:`EC2.Client.describe_vpn_connections` every 15 seconds until a successful state is reached. An error is returned after 40 failed checks. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections>`_ **Request Syntax** :: waiter.wait( Filters=[ { 'Name': 'string', 'Values': [ 'string', ] }, ], VpnConnectionIds=[ 'string', ], DryRun=True|False, WaiterConfig={ 'Delay': 123, 'MaxAttempts': 123 } ) :type Filters: list :param Filters: One or more filters. * ``customer-gateway-configuration`` - The configuration information for the customer gateway. * ``customer-gateway-id`` - The ID of a customer gateway associated with the VPN connection. * ``state`` - The state of the VPN connection (``pending`` | ``available`` | ``deleting`` | ``deleted`` ). * ``option.static-routes-only`` - Indicates whether the connection has static routes only. Used for devices that do not support Border Gateway Protocol (BGP). * ``route.destination-cidr-block`` - The destination CIDR block. This corresponds to the subnet used in a customer data center. * ``bgp-asn`` - The BGP Autonomous System Number (ASN) associated with a BGP device. * ``tag`` :<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key ``Owner`` and the value ``TeamA`` , specify ``tag:Owner`` for the filter name and ``TeamA`` for the filter value. * ``tag-key`` - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. * ``type`` - The type of VPN connection. Currently the only supported type is ``ipsec.1`` . * ``vpn-connection-id`` - The ID of the VPN connection. * ``vpn-gateway-id`` - The ID of a virtual private gateway associated with the VPN connection. - *(dict) --* A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example: * DescribeAvailabilityZones * DescribeImages * DescribeInstances * DescribeKeyPairs * DescribeSecurityGroups * DescribeSnapshots * DescribeSubnets * DescribeTags * DescribeVolumes * DescribeVpcs - **Name** *(string) --* The name of the filter. Filter names are case-sensitive. - **Values** *(list) --* The filter values. Filter values are case-sensitive. - *(string) --* :type VpnConnectionIds: list :param VpnConnectionIds: One or more VPN connection IDs. Default: Describes your VPN connections. - *(string) --* :type DryRun: boolean :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is ``DryRunOperation`` . Otherwise, it is ``UnauthorizedOperation`` . :type WaiterConfig: dict :param WaiterConfig: A dictionary that provides parameters to control waiting behavior. - **Delay** *(integer) --* The amount of time in seconds to wait between attempts. Default: 15 - **MaxAttempts** *(integer) --* The maximum number of attempts to be made. Default: 40 :returns: None """<line_sep><pass><block_end><block_end>
<import_from_stmt>datetime timedelta<import_stmt>pendulum<import_stmt>pytest<import_stmt>prefect.schedules.adjustments<as>adjustments<import_stmt>prefect.schedules.filters<line_sep>@pytest.mark.parametrize("interval" [timedelta(days=1) timedelta(seconds=0) timedelta(days=-1) timedelta(microseconds=1) ] )<def_stmt>test_add interval<block_start>dt=pendulum.now()<line_sep>adjustment_fn=adjustments.add(interval)<assert_stmt>adjustment_fn(dt)<eq>dt+interval<block_end>@pytest.mark.parametrize("dt" [pendulum.datetime(2019 1 i)<for>i range(1 10)])<def_stmt>test_next_weekday dt<block_start>adjusted=adjustments.next_weekday(dt)<if_stmt>prefect.schedules.filters.is_weekday(dt)<block_start><assert_stmt>adjusted<is>dt<block_end><else_stmt><block_start><assert_stmt>adjusted<g>dt<and>adjusted.weekday()<eq>0<block_end><block_end>
# coding: utf-8 # flake8: noqa """ Run the tests. $ pip install nose (optional) $ cd petstore_api-python $ nosetests -v """<import_from_future_stmt> absolute_import<import_stmt>unittest<import_stmt>petstore_api<class_stmt>TestConfiguration(unittest.TestCase)<block_start>"""Animal unit test stubs"""<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self# reset Configuration <block_start>petstore_api.Configuration.set_default(<none>)<block_end><def_stmt>testConfiguration self# check that different instances use different dictionaries <block_start>c1=petstore_api.Configuration()<line_sep>c2=petstore_api.Configuration()<line_sep>self.assertNotEqual(id(c1.api_key) id(c2.api_key))<line_sep>self.assertNotEqual(id(c1.api_key_prefix) id(c2.api_key_prefix))<block_end><def_stmt>testDefaultConfiguration self# prepare default configuration <block_start>c1=petstore_api.Configuration(host="example.com")<line_sep>c1.debug=<true><line_sep>petstore_api.Configuration.set_default(c1)<line_sep># get default configuration c2=petstore_api.Configuration.get_default_copy()<line_sep>self.assertEqual(c2.host "example.com")<line_sep>self.assertTrue(c2.debug)<line_sep>self.assertNotEqual(id(c1.api_key) id(c2.api_key))<line_sep>self.assertNotEqual(id(c1.api_key_prefix) id(c2.api_key_prefix))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright 2021 AIOps Squad, Riiid Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>.config PROJECT_CONFIG_FNAME<import_from_stmt>.config PIPELINE_CONFIG_FNAME<import_from_stmt>.config ProjectConfig<import_from_stmt>.config PipelineConfig<import_from_stmt>.config get_project_config<import_from_stmt>.config get_pipeline_config<line_sep>__all__=["PROJECT_CONFIG_FNAME" "PIPELINE_CONFIG_FNAME" "ProjectConfig" "PipelineConfig" "get_project_config" "get_pipeline_config" ]<line_sep>
<def_stmt>dependency_in_local_module_directory foo<block_start><return>foo<block_end>
<import_stmt>re<import_stmt>json<import_from_stmt>spytest st<line_sep>APPL_DB="APPL_DB"<line_sep>ASIC_DB="ASIC_DB"<line_sep>COUNTERS_DB="COUNTERS_DB"<line_sep>LOGLEVEL_DB="LOGLEVEL_DB"<line_sep>CONFIG_DB="CONFIG_DB"<line_sep>PFC_WD_DB="PFC_WD_DB"<line_sep>FLEX_COUNTER_DB="FLEX_COUNTER_DB"<line_sep>STATE_DB="STATE_DB"<line_sep>SNMP_OVERLAY_DB="SNMP_OVERLAY_DB"<line_sep>ERROR_DB="ERROR_DB"<line_sep>########################## TODO #################################### # read db_port_map from /var/run/redis/sonic-db/database_config.json #################################################################### db_id_map={APPL_DB:0 ASIC_DB:1 COUNTERS_DB:2 LOGLEVEL_DB:3 CONFIG_DB:4 PFC_WD_DB:5 FLEX_COUNTER_DB:5 STATE_DB:6 SNMP_OVERLAY_DB:7 ERROR_DB:8}<line_sep># Port map used for A/A+/B/B-MR db_default_port_map={APPL_DB:6379 ASIC_DB:6379 COUNTERS_DB:6379 LOGLEVEL_DB:6379 CONFIG_DB:6379 PFC_WD_DB:6379 FLEX_COUNTER_DB:6379 STATE_DB:6379 SNMP_OVERLAY_DB:6379 ERROR_DB:6379}<line_sep># Read /var/run/redis/sonic-db/database_config.json on DUT and populate db_port_map db_port_map={}<line_sep># 0 - use redis-cli # 1 - use redis-cli -p # 2 - use sonic-db-cli <def_stmt>db_cli_init dut<block_start>db_map_read(dut)<line_sep>db_cli=st.getenv("SPYTEST_REDIS_DB_CLI_TYPE" "1")<if_stmt>db_cli<in>["1" "2"]<block_start>db_map_read(dut)<block_end><if_stmt>db_cli<in>["0" "1" "2"]<block_start><return>db_cli<block_end>output=st.show(dut 'ls /usr/local/bin/sonic-db-cli' skip_tmpl=<true>)<line_sep><return>"0"<if>re.search(r'No such file or directory' output)<else>"2"<block_end><def_stmt>db_map_read dut<block_start><global>db_port_map<line_sep>db_dict=<none><line_sep>db_json=st.config(dut "cat /var/run/redis/sonic-db/database_config.json").split("\n")<line_sep>db_json.pop()<try_stmt><block_start>db_dict=json.loads("".join(db_json))<line_sep>db_instances=db_dict.get("INSTANCES")<for_stmt>db_name,db_data db_dict.get("DATABASES").items()<block_start>db_port_map[db_name]=db_instances[db_data["instance"]].get("port")<block_end><block_end><except_stmt>Exception<block_start>db_port_map=db_default_port_map<block_end><block_end><def_stmt>_prefix dut db suffix="cli"<block_start>db_cli=st.get_dut_var(dut "redis_db_cli")<if_stmt>db<and>db<not><in>db_id_map<block_start><raise>ValueError("Unknown DB name {} in ID Map".format(db))<block_end><if_stmt>db<and>db<not><in>db_port_map<block_start><raise>ValueError("Unknown DB name {} in Port Map".format(db))<block_end><if_stmt>db_cli<eq>"2"<block_start><return>"sonic-db-{} {}".format(suffix db<or>"")<block_end><if_stmt>db_cli<eq>"1"<block_start>cmd="redis-{} -p {}".format(suffix db_port_map[db])<block_end><else_stmt><block_start>cmd="redis-{}".format(suffix)<block_end><return>"{} -n {}".format(cmd db_id_map[db])<if>db<else>cmd<block_end><def_stmt>scan dut db pattern skip_error_check=<false><block_start>cmd="{} --scan --pattern '{}'".format(_prefix(dut db) pattern)<line_sep><return>st.config(dut cmd skip_error_check=skip_error_check)<block_end><def_stmt>dump dut db pattern skip_error_check=<false><block_start>cmd="{} -k '{}' -y".format(_prefix(dut db "dump") pattern)<line_sep><return>st.config(dut cmd skip_error_check=skip_error_check)<block_end><def_stmt>build dut db cmd<block_start><return>"{} {}".format(_prefix(dut db) cmd)<block_end><def_stmt>config dut db cmd skip_error_check=<false><block_start>dev_cmd=build(dut db cmd)<line_sep><return>st.config(dut dev_cmd skip_error_check=skip_error_check)<block_end><def_stmt>show dut db cmd skip_tmpl=<false><block_start>dev_cmd=build(dut db cmd)<line_sep><return>st.show(dut dev_cmd skip_tmpl=skip_tmpl)<block_end>
# coding:utf-8 <import_stmt>paddle.v2<as>paddle<line_sep># 卷积神经网络LeNet-5,获取分类器 <def_stmt>convolutional_neural_network datadim type_size<block_start>image=paddle.layer.data(name="image" type=paddle.data_type.dense_vector(datadim))<line_sep># 第一个卷积--池化层 conv_pool_1=paddle.networks.simple_img_conv_pool(input=image filter_size=5 num_filters=20 num_channel=1 pool_size=2 pool_stride=2 act=paddle.activation.Relu())<line_sep># 第二个卷积--池化层 conv_pool_2=paddle.networks.simple_img_conv_pool(input=conv_pool_1 filter_size=5 num_filters=50 num_channel=20 pool_size=2 pool_stride=2 act=paddle.activation.Relu())<line_sep># 以softmax为激活函数的全连接输出层 out=paddle.layer.fc(input=conv_pool_2 size=type_size act=paddle.activation.Softmax())<line_sep><return>out<block_end>
# Bank, 3 clerks (resources).py <import_stmt>salabim<as>sim<class_stmt>CustomerGenerator(sim.Component)<block_start><def_stmt>process self<block_start><while_stmt><true><block_start>Customer()<line_sep><yield>self.hold(sim.Uniform(5 15).sample())<block_end><block_end><block_end><class_stmt>Customer(sim.Component)<block_start><def_stmt>process self<block_start><yield>self.request(clerks)<line_sep><yield>self.hold(30)<line_sep>self.release()<block_end><block_end># not really required env=sim.Environment(trace=<false>)<line_sep>CustomerGenerator()<line_sep>clerks=sim.Resource("clerks" capacity=3)<line_sep>env.run(till=50000)<line_sep>clerks.print_statistics()<line_sep>clerks.print_info()<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>django forms<import_from_stmt>django.conf settings<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>django.utils.translation ugettext_lazy<as>_<line_sep>User=get_user_model()<class_stmt>UserSuForm(forms.Form)<block_start>username_field=User.USERNAME_FIELD<line_sep>user=forms.ModelChoiceField(label=_('Users') queryset=User._default_manager.order_by(username_field) required=<true>)<line_sep># pylint: disable=W0212 use_ajax_select=<false><def_stmt>__init__ self *args **kwargs<block_start>super(UserSuForm self).__init__(*args **kwargs)<if_stmt>'ajax_select'<in>settings.INSTALLED_APPS<and>getattr(settings 'AJAX_LOOKUP_CHANNELS' <none>)<block_start><import_from_stmt>ajax_select.fields AutoCompleteSelectField<line_sep>lookup=settings.AJAX_LOOKUP_CHANNELS.get('django_su' <none>)<if_stmt>lookup<is><not><none><block_start>old_field=self.fields['user']<line_sep>self.fields['user']=AutoCompleteSelectField('django_su' required=old_field.required label=old_field.label )<line_sep>self.use_ajax_select=<true><block_end><block_end><block_end><def_stmt>get_user self<block_start><return>self.cleaned_data.get('user' <none>)<block_end><def_stmt>__str__ self<block_start><if_stmt>'formadmin'<in>settings.INSTALLED_APPS<block_start><try_stmt><block_start><import_from_stmt>formadmin.forms as_django_admin<line_sep><return>as_django_admin(self)<block_end><except_stmt>ImportError<block_start><pass><block_end><block_end><return>super(UserSuForm self).__str__()<block_end><block_end>
<import_stmt>six<import_stmt>Queue<import_stmt>socket<import_stmt>multiprocessing<import_from_stmt>time time<import_from_stmt>itertools chain<import_from_stmt>collections defaultdict<import_from_stmt>bisect bisect_left bisect_right<import_from_stmt>contextlib contextmanager<import_from_stmt>nel logging<line_sep>log=logging.getLogger()<def_stmt>get_from_module cid mod_params mod_name instantiate=<false> kwargs=<none><block_start><if_stmt>isinstance(cid six.string_types)<block_start>res=mod_params.get(cid)<if_stmt><not>res<block_start><raise>Exception('Invalid '+str(mod_name)+': '+str(cid))<block_end><if_stmt>instantiate<and><not>kwargs<block_start><return>res()<block_end><elif_stmt>instantiate<and>kwargs<block_start><return>res(**kwargs)<block_end><else_stmt><block_start><return>res<block_end><block_end><return>cid<block_end><def_stmt>byte_to_char_map byte_str encoding='utf-8'<block_start>mapping={}<line_sep>char_str=byte_str.decode(encoding)<line_sep>byte_offset,char_offset=0 0<for_stmt>char_offset,c enumerate(char_str)<block_start>mapping[byte_offset]=char_offset<line_sep>byte_offset<augadd>len(c.encode(encoding))<block_end>mapping[byte_offset]=char_offset<line_sep><return>mapping<block_end><def_stmt>group iteration key_getter value_getter<block_start>d=defaultdict(list)<for_stmt>item iteration<block_start>d[key_getter(item)].append(value_getter(item))<block_end><return>d<block_end><def_stmt>invert_grouping g<block_start>d=defaultdict(list)<for_stmt>k,items g.iteritems()<block_start><for_stmt>i items<block_start>d[i].append(k)<block_end><block_end><return>d<block_end><def_stmt>spanset_insert indicies begin end<block_start>""" Determines if a span from an index set is occupied in O(log(n)) """<line_sep>b_idx=bisect_right(indicies begin)<line_sep>e_idx=bisect_left(indicies end)<line_sep>can_insert=b_idx<eq>e_idx<and>(b_idx<eq>0<or>indicies[b_idx-1]<ne>begin)<and>(e_idx<eq>len(indicies)<or>indicies[e_idx]<ne>end)<and>b_idx%2<eq>0<if_stmt>can_insert<block_start>indicies.insert(b_idx begin)<line_sep>indicies.insert(b_idx+1 end)<block_end><return>can_insert<block_end><def_stmt>spawn_worker f<block_start><def_stmt>fun wid q_in q_out recycle_interval<block_start>job_count=0<while_stmt><true><block_start>i,x=q_in.get()<if_stmt>i<is><none><block_start><break><block_end><try_stmt><block_start>recycle_id=wid<if>job_count+1<eq>recycle_interval<else><none><line_sep>q_out.put(((i f(x)) recycle_id))<line_sep>job_count<augadd>1<if_stmt>recycle_id<ne><none><block_start><return><block_end><block_end><except_stmt>Exception<as>e<block_start>log.error("Worker function exception: %s"%e)<line_sep><raise><block_end><block_end><block_end><return>fun<block_end><def_stmt>iter_to_input_queue iteration q_in p_control<block_start>iteration_len=0<for_stmt>i,x enumerate(iteration)<block_start>q_in.put((i x))<line_sep>iteration_len<augadd>1<block_end>p_control.send(iteration_len)<line_sep>p_control.close()<block_end><class_stmt>parmapper(object)<block_start><def_stmt>__init__ self job nprocs=<none> recycle_interval=5<block_start><if_stmt>nprocs<eq><none><block_start>nprocs=multiprocessing.cpu_count()-1<block_end>self.job=job<line_sep>self.q_in=multiprocessing.Queue(1)<line_sep>self.q_out=multiprocessing.Queue(nprocs)<line_sep>self.recycle_interval=recycle_interval<line_sep>self.procs=[self.get_process(i)<for>i range(nprocs)]<block_end><def_stmt>get_process self idx<block_start><return>multiprocessing.Process(target=spawn_worker(self.job) args=(idx self.q_in self.q_out self.recycle_interval))<block_end><def_stmt>run_process self idx<block_start>self.procs[idx].daemon=<true><line_sep>self.procs[idx].start()<block_end><def_stmt>__enter__ self<block_start><for_stmt>i xrange(len(self.procs))<block_start>self.run_process(i)<block_end><return>self<block_end><def_stmt>recycle_worker self wid<block_start>worker=self.procs[wid]<line_sep>#log.debug('Recycling worker id=%i, pid=%i...' % (wid, worker.pid)) worker.join()<line_sep>self.procs[wid]=self.get_process(wid)<line_sep>self.run_process(wid)<block_end><def_stmt>consume self producer<block_start>worker_pipe,control_pipe=multiprocessing.Pipe(<true>)<line_sep>async_input_iterator=multiprocessing.Process(target=iter_to_input_queue args=(producer self.q_in worker_pipe))<line_sep>async_input_iterator.daemon=<true><line_sep>async_input_iterator.start()<line_sep>expected_output_count=<none><line_sep>output_count=0<while_stmt>expected_output_count<eq><none><or>expected_output_count<g>output_count<block_start><if_stmt>expected_output_count<eq><none><and>control_pipe.poll()<block_start>expected_output_count=control_pipe.recv()<line_sep>#log.debug('Producer exhausted with %i items total, %i remaining...' % (expected_output_count, expected_output_count - output_count)) <block_end><try_stmt># couldn't get this working without a busy wait <block_start>out,recycle_wid=self.q_out.get_nowait()<while_stmt><true><block_start><if_stmt>recycle_wid<ne><none><block_start>self.recycle_worker(recycle_wid)<block_end><yield>out<line_sep>output_count<augadd>1<line_sep>out,recycle_wid=self.q_out.get_nowait()<block_end><block_end><except_stmt>Queue.Empty<block_start><pass><block_end><block_end>async_input_iterator.join()<block_end><def_stmt>__exit__ self t value traceback<block_start><for_stmt>_ self.procs<block_start>self.q_in.put((<none> <none>))<block_end><for_stmt>p self.procs<block_start>p.join()<block_end><block_end><block_end># todo: kill after some timeout @contextmanager<def_stmt>tcp_socket host port<block_start>s=socket.socket(socket.AF_INET socket.SOCK_STREAM)<try_stmt><block_start>s.connect((host port))<line_sep><yield>s<block_end><finally_stmt><block_start><try_stmt><block_start>s.shutdown(socket.SHUT_RDWR)<block_end><except_stmt>socket.error<block_start><pass><block_end><except_stmt>OSError<block_start><pass><block_end><finally_stmt><block_start>s.close()<block_end><block_end><block_end><class_stmt>trie(object)<block_start><def_stmt>__init__ self<block_start>self.Children=defaultdict(trie)<line_sep>self.Matches=set()<block_end><def_stmt>insert_many self sequence entities<block_start><if_stmt>len(entities)<g>0<block_start>self._insert(sequence entities 0 <true>)<block_end><block_end><def_stmt>insert self sequence e<block_start>self._insert(sequence e 0 <false>)<block_end><def_stmt>_insert self sequence e offset multi<block_start><if_stmt>offset<l>len(sequence)<block_start>item=sequence[offset]<line_sep>self.Children[item]._insert(sequence e offset+1 multi)<block_end><else_stmt><block_start><if_stmt>multi<block_start><for_stmt>entity e<block_start>self.Matches.add((entity offset))<block_end><block_end><else_stmt><block_start>self.Matches.add((e offset))<block_end><block_end><block_end><def_stmt>iter_matches self<block_start><for_stmt>e self.Matches<block_start><yield>e<block_end><block_end><def_stmt>scan self seq<block_start><for_stmt>i xrange(0 len(seq))<block_start><for_stmt>m self.match(seq i <true> <true>)<block_start><yield>m<block_end><block_end><block_end><def_stmt>match self seq offset=0 subsequences=<false> inorder=<true># if we are yielding subsequence matches, or the sequence # is complete return all entities for the current node <block_start>current=[(e (offset-length offset))<for>e,length self.iter_matches()]<if>subsequences<or>offset<eq>len(seq)<else><none><line_sep># iteration for the next items in the sequence pending=<none><if_stmt>seq<and>offset<l>len(seq)<block_start>token=seq[offset]<if_stmt>token<in>self.Children<block_start>pending=self.Children[token].match(seq offset+1 subsequences inorder)<block_end><block_end><if_stmt>current<and>pending<block_start><return>chain(current pending)<if>inorder<else>chain(pending current)<block_end><return>current<or>pending<or>[]<block_end><block_end>
# coding=utf-8 # Copyright 2021 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for the Policy."""<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>jax.test_util<import_from_stmt>ott.core sinkhorn<import_from_stmt>ott.geometry costs<import_from_stmt>ott.geometry geometry<import_from_stmt>ott.geometry pointcloud<class_stmt>SinkhornTest(jax.test_util.JaxTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.rng=jax.random.PRNGKey(0)<line_sep>self.dim=4<line_sep>self.n=68<line_sep>self.m=123<line_sep>self.rng,*rngs=jax.random.split(self.rng 5)<line_sep>self.x=jax.random.uniform(rngs[0] (self.n self.dim))<line_sep>self.y=jax.random.uniform(rngs[1] (self.m self.dim))<line_sep>a=jax.random.uniform(rngs[2] (self.n ))<line_sep>b=jax.random.uniform(rngs[3] (self.m ))<line_sep># adding zero weights to test proper handling a=a.at[0].set(0)<line_sep>b=b.at[3].set(0)<line_sep>self.a=a/jnp.sum(a)<line_sep>self.b=b/jnp.sum(b)<block_end>@parameterized.named_parameters(dict(testcase_name='lse-Leh-mom' lse_mode=<true> momentum=1.0 chg_momentum_from=29 inner_iterations=10 norm_error=1) dict(testcase_name='lse-small-mom' lse_mode=<true> momentum=1.01 chg_momentum_from=0 inner_iterations=10 norm_error=1) dict(testcase_name='lse-high-mom' lse_mode=<true> momentum=1.5 chg_momentum_from=0 inner_iterations=10 norm_error=1) dict(testcase_name='scal-Leh-mom' lse_mode=<false> momentum=1.01 chg_momentum_from=30 inner_iterations=10 norm_error=1) dict(testcase_name='scal-no-mom' lse_mode=<false> momentum=1.0 chg_momentum_from=0 inner_iterations=10 norm_error=1 ) dict(testcase_name='scal-high-mom' lse_mode=<false> momentum=1.5 chg_momentum_from=0 inner_iterations=10 norm_error=1 ) dict(testcase_name='lse-Leh-1' lse_mode=<true> momentum=1.0 chg_momentum_from=60 inner_iterations=1 norm_error=2) dict(testcase_name='lse-Leh-13' lse_mode=<true> momentum=1.0 chg_momentum_from=40 inner_iterations=13 norm_error=3 ) dict(testcase_name='lse-Leh-24' lse_mode=<true> momentum=1.0 chg_momentum_from=12 inner_iterations=24 norm_error=4 ))<def_stmt>test_euclidean_point_cloud self lse_mode momentum chg_momentum_from inner_iterations norm_error<block_start>"""Two point clouds, tested with various parameters."""<line_sep>threshold=1e-3<line_sep>geom=pointcloud.PointCloud(self.x self.y epsilon=0.1)<line_sep>errors=sinkhorn.sinkhorn(geom a=self.a b=self.b threshold=threshold momentum=momentum chg_momentum_from=chg_momentum_from inner_iterations=inner_iterations norm_error=norm_error lse_mode=lse_mode).errors<line_sep>err=errors[errors<g>-1][-1]<line_sep>self.assertGreater(threshold err)<block_end><def_stmt>test_autoepsilon self<block_start>"""Check that with auto-epsilon, dual potentials scale."""<line_sep>scale=2.77<line_sep># First geom specifies explicitly relative_epsilon to be True. This is not # needed in principle, but introduced here to test logic. geom_1=pointcloud.PointCloud(self.x self.y relative_epsilon=<true>)<line_sep># jit first with jit inside sinkhorn call. f_1=sinkhorn.sinkhorn(geom_1 a=self.a b=self.b tau_a=.99 tau_b=.97 jit=<true>).f<line_sep># Second geom does not provide whether epsilon is relative. geom_2=pointcloud.PointCloud(scale<times>self.x scale<times>self.y)<line_sep># jit now with jit outside sinkhorn call. compute_f=jax.jit(<lambda>g a b:sinkhorn.sinkhorn(g a b tau_a=.99 tau_b=.97).f)<line_sep>f_2=compute_f(geom_2 self.a self.b)<line_sep># Ensure epsilon and optimal f's are a scale^2 apart (^2 comes from ^2 cost) self.assertAllClose(geom_1.epsilon<times>scale<power>2 geom_2.epsilon rtol=1e-3 atol=1e-3)<line_sep>self.assertAllClose(geom_1._epsilon.at(2)<times>scale<power>2 geom_2._epsilon.at(2) rtol=1e-3 atol=1e-3)<line_sep>self.assertAllClose(f_1<times>scale<power>2 f_2 rtol=1e-3 atol=1e-3)<block_end>@parameterized.product(lse_mode=[<true> <false>] init=[2 5] decay=[.8 .9] tau_a=[1.0 .93] tau_b=[1.0 .91])<def_stmt>test_autoepsilon_with_decay self lse_mode init decay tau_a tau_b<block_start>"""Check that variations in init/decay work, and result in same solution."""<line_sep>geom=pointcloud.PointCloud(self.x self.y init=init decay=decay)<line_sep>out_1=sinkhorn.sinkhorn(geom a=self.a b=self.b tau_a=tau_a tau_b=tau_b jit=<true> threshold=1e-5)<line_sep>geom=pointcloud.PointCloud(self.x self.y)<line_sep>out_2=sinkhorn.sinkhorn(geom a=self.a b=self.b tau_a=tau_a tau_b=tau_b jit=<true> threshold=1e-5)<line_sep># recenter if problem is balanced, since in that case solution is only # valid up to additive constant. unb=(tau_a<l>1.0<or>tau_b<l>1.0)<line_sep>self.assertAllClose(out_1.f<if>unb<else>out_1.f-jnp.mean(out_1.f[jnp.isfinite(out_1.f)]) out_2.f<if>unb<else>out_2.f-jnp.mean(out_2.f[jnp.isfinite(out_2.f)]) rtol=1e-4 atol=1e-4)<block_end><def_stmt>test_euclidean_point_cloud_min_iter self<block_start>"""Testing the min_iterations parameter."""<line_sep>threshold=1e-3<line_sep>geom=pointcloud.PointCloud(self.x self.y epsilon=0.1)<line_sep>errors=sinkhorn.sinkhorn(geom a=self.a b=self.b threshold=threshold min_iterations=34 implicit_differentiation=<false>).errors<line_sep>err=errors[jnp.logical_and(errors<g>-1 jnp.isfinite(errors))][-1]<line_sep>self.assertGreater(threshold err)<line_sep>self.assertEqual(jnp.inf errors[0])<line_sep>self.assertEqual(jnp.inf errors[1])<line_sep>self.assertEqual(jnp.inf errors[2])<line_sep>self.assertGreater(errors[3] 0)<block_end><def_stmt>test_geom_vs_point_cloud self<block_start>"""Two point clouds vs. simple cost_matrix execution of sinkorn."""<line_sep>geom_1=pointcloud.PointCloud(self.x self.y)<line_sep>geom_2=geometry.Geometry(geom_1.cost_matrix)<line_sep>f_1=sinkhorn.sinkhorn(geom_1 a=self.a b=self.b).f<line_sep>f_2=sinkhorn.sinkhorn(geom_2 a=self.a b=self.b).f<line_sep># recentering to remove ambiguity on equality up to additive constant. f_1<augsub>jnp.mean(f_1[jnp.isfinite(f_1)])<line_sep>f_2<augsub>jnp.mean(f_2[jnp.isfinite(f_2)])<line_sep>self.assertAllClose(f_1 f_2)<block_end>@parameterized.parameters([<true>] [<false>])<def_stmt>test_euclidean_point_cloud_parallel_weights self lse_mode<block_start>"""Two point clouds, parallel execution for batched histograms."""<line_sep>self.rng,*rngs=jax.random.split(self.rng 2)<line_sep>batch=4<line_sep>a=jax.random.uniform(rngs[0] (batch self.n))<line_sep>b=jax.random.uniform(rngs[0] (batch self.m))<line_sep>a=a/jnp.sum(a axis=1)[: jnp.newaxis]<line_sep>b=b/jnp.sum(b axis=1)[: jnp.newaxis]<line_sep>threshold=1e-3<line_sep>geom=pointcloud.PointCloud(self.x self.y epsilon=0.1 online=<true>)<line_sep>errors=sinkhorn.sinkhorn(geom a=self.a b=self.b threshold=threshold lse_mode=lse_mode).errors<line_sep>err=errors[errors<g>-1][-1]<line_sep>self.assertGreater(jnp.min(threshold-err) 0)<block_end>@parameterized.parameters([<true>] [<false>])<def_stmt>test_online_euclidean_point_cloud self lse_mode<block_start>"""Testing the online way to handle geometry."""<line_sep>threshold=1e-3<line_sep>geom=pointcloud.PointCloud(self.x self.y epsilon=0.1 online=<true>)<line_sep>errors=sinkhorn.sinkhorn(geom a=self.a b=self.b threshold=threshold lse_mode=lse_mode).errors<line_sep>err=errors[errors<g>-1][-1]<line_sep>self.assertGreater(threshold err)<block_end>@parameterized.parameters([<true>] [<false>])<def_stmt>test_online_vs_batch_euclidean_point_cloud self lse_mode<block_start>"""Comparing online vs batch geometry."""<line_sep>threshold=1e-3<line_sep>eps=0.1<line_sep>online_geom=pointcloud.PointCloud(self.x self.y epsilon=eps online=<true>)<line_sep>online_geom_euc=pointcloud.PointCloud(self.x self.y cost_fn=costs.Euclidean() epsilon=eps online=<true>)<line_sep>batch_geom=pointcloud.PointCloud(self.x self.y epsilon=eps)<line_sep>batch_geom_euc=pointcloud.PointCloud(self.x self.y cost_fn=costs.Euclidean() epsilon=eps)<line_sep>out_online=sinkhorn.sinkhorn(online_geom a=self.a b=self.b threshold=threshold lse_mode=lse_mode)<line_sep>out_batch=sinkhorn.sinkhorn(batch_geom a=self.a b=self.b threshold=threshold lse_mode=lse_mode)<line_sep>out_online_euc=sinkhorn.sinkhorn(online_geom_euc a=self.a b=self.b threshold=threshold lse_mode=lse_mode)<line_sep>out_batch_euc=sinkhorn.sinkhorn(batch_geom_euc a=self.a b=self.b threshold=threshold lse_mode=lse_mode)<line_sep># Checks regularized transport costs match. self.assertAllClose(out_online.reg_ot_cost out_batch.reg_ot_cost)<line_sep># check regularized transport matrices match self.assertAllClose(online_geom.transport_from_potentials(out_online.f out_online.g) batch_geom.transport_from_potentials(out_batch.f out_batch.g))<line_sep>self.assertAllClose(online_geom_euc.transport_from_potentials(out_online_euc.f out_online_euc.g) batch_geom_euc.transport_from_potentials(out_batch_euc.f out_batch_euc.g))<line_sep>self.assertAllClose(batch_geom.transport_from_potentials(out_batch.f out_batch.g) batch_geom_euc.transport_from_potentials(out_batch_euc.f out_batch_euc.g))<block_end><def_stmt>test_apply_transport_geometry_from_potentials self<block_start>"""Applying transport matrix P on vector without instantiating P."""<line_sep>n,m,d=160 230 6<line_sep>keys=jax.random.split(self.rng 6)<line_sep>x=jax.random.uniform(keys[0] (n d))<line_sep>y=jax.random.uniform(keys[1] (m d))<line_sep>a=jax.random.uniform(keys[2] (n ))<line_sep>b=jax.random.uniform(keys[3] (m ))<line_sep>a=a/jnp.sum(a)<line_sep>b=b/jnp.sum(b)<line_sep>transport_t_vec_a=[<none> <none> <none> <none>]<line_sep>transport_vec_b=[<none> <none> <none> <none>]<line_sep>batch_b=8<line_sep>vec_a=jax.random.normal(keys[4] (n ))<line_sep>vec_b=jax.random.normal(keys[5] (batch_b m))<line_sep># test with lse_mode and online = True / False <for_stmt>j,lse_mode enumerate([<true> <false>])<block_start><for_stmt>i,online enumerate([<true> <false>])<block_start>geom=pointcloud.PointCloud(x y online=online epsilon=0.2)<line_sep>sink=sinkhorn.sinkhorn(geom a b lse_mode=lse_mode)<line_sep>transport_t_vec_a[i+2<times>j]=geom.apply_transport_from_potentials(sink.f sink.g vec_a axis=0)<line_sep>transport_vec_b[i+2<times>j]=geom.apply_transport_from_potentials(sink.f sink.g vec_b axis=1)<line_sep>transport=geom.transport_from_potentials(sink.f sink.g)<line_sep>self.assertAllClose(transport_t_vec_a[i+2<times>j] jnp.dot(transport.T vec_a).T rtol=1e-3 atol=1e-3)<line_sep>self.assertAllClose(transport_vec_b[i+2<times>j] jnp.dot(transport vec_b.T).T rtol=1e-3 atol=1e-3)<block_end><block_end><for_stmt>i range(4)<block_start>self.assertAllClose(transport_vec_b[i] transport_vec_b[0] rtol=1e-3 atol=1e-3)<line_sep>self.assertAllClose(transport_t_vec_a[i] transport_t_vec_a[0] rtol=1e-3 atol=1e-3)<block_end><block_end><def_stmt>test_apply_transport_geometry_from_scalings self<block_start>"""Applying transport matrix P on vector without instantiating P."""<line_sep>n,m,d=160 230 6<line_sep>keys=jax.random.split(self.rng 6)<line_sep>x=jax.random.uniform(keys[0] (n d))<line_sep>y=jax.random.uniform(keys[1] (m d))<line_sep>a=jax.random.uniform(keys[2] (n ))<line_sep>b=jax.random.uniform(keys[3] (m ))<line_sep>a=a/jnp.sum(a)<line_sep>b=b/jnp.sum(b)<line_sep>transport_t_vec_a=[<none> <none> <none> <none>]<line_sep>transport_vec_b=[<none> <none> <none> <none>]<line_sep>batch_b=8<line_sep>vec_a=jax.random.normal(keys[4] (n ))<line_sep>vec_b=jax.random.normal(keys[5] (batch_b m))<line_sep># test with lse_mode and online = True / False <for_stmt>j,lse_mode enumerate([<true> <false>])<block_start><for_stmt>i,online enumerate([<true> <false>])<block_start>geom=pointcloud.PointCloud(x y online=online epsilon=0.2)<line_sep>sink=sinkhorn.sinkhorn(geom a b lse_mode=lse_mode)<line_sep>u=geom.scaling_from_potential(sink.f)<line_sep>v=geom.scaling_from_potential(sink.g)<line_sep>transport_t_vec_a[i+2<times>j]=geom.apply_transport_from_scalings(u v vec_a axis=0)<line_sep>transport_vec_b[i+2<times>j]=geom.apply_transport_from_scalings(u v vec_b axis=1)<line_sep>transport=geom.transport_from_scalings(u v)<line_sep>self.assertAllClose(transport_t_vec_a[i+2<times>j] jnp.dot(transport.T vec_a).T rtol=1e-3 atol=1e-3)<line_sep>self.assertAllClose(transport_vec_b[i+2<times>j] jnp.dot(transport vec_b.T).T rtol=1e-3 atol=1e-3)<line_sep>self.assertIsNot(jnp.any(jnp.isnan(transport_t_vec_a[i+2<times>j])) <true>)<block_end><block_end><for_stmt>i range(4)<block_start>self.assertAllClose(transport_vec_b[i] transport_vec_b[0] rtol=1e-3 atol=1e-3)<line_sep>self.assertAllClose(transport_t_vec_a[i] transport_t_vec_a[0] rtol=1e-3 atol=1e-3)<block_end><block_end>@parameterized.parameters([<true>] [<false>])<def_stmt>test_restart self lse_mode<block_start>"""Two point clouds, tested with various parameters."""<line_sep>threshold=1e-4<line_sep>geom=pointcloud.PointCloud(self.x self.y epsilon=0.01)<line_sep>out=sinkhorn.sinkhorn(geom a=self.a b=self.b threshold=threshold lse_mode=lse_mode inner_iterations=1)<line_sep>errors=out.errors<line_sep>err=errors[errors<g>-1][-1]<line_sep>self.assertGreater(threshold err)<line_sep># recover solution from previous and ensure faster convergence. <if_stmt>lse_mode<block_start>init_dual_a,init_dual_b=out.f out.g<block_end><else_stmt><block_start>init_dual_a,init_dual_b=(geom.scaling_from_potential(out.f) geom.scaling_from_potential(out.g))<block_end>out_restarted=sinkhorn.sinkhorn(geom a=self.a b=self.b threshold=threshold lse_mode=lse_mode init_dual_a=init_dual_a init_dual_b=init_dual_b inner_iterations=1)<line_sep>errors_restarted=out_restarted.errors<line_sep>err_restarted=errors_restarted[errors_restarted<g>-1][-1]<line_sep>self.assertGreater(threshold err_restarted)<line_sep>num_iter_restarted=jnp.sum(errors_restarted<g>-1)<line_sep># check we can only improve on error self.assertGreater(err err_restarted)<line_sep># check first error in restart does at least as well as previous best self.assertGreater(err errors_restarted[0])<line_sep># check only one iteration suffices when restarting with same data. self.assertEqual(num_iter_restarted 1)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_from_stmt>typing List<import_from_stmt>hypothesis given<import_from_stmt>hypothesis strategies<as>st<import_from_stmt>hypothesis.extra.numpy arrays<import_stmt>numpy<as>np<import_from_stmt>numpy ndarray<import_from_stmt>numpy.testing assert_almost_equal assert_array_equal<import_from_stmt>scvelo.core clipped_log invert prod_sum sum<class_stmt>TestClippedLog<block_start>@given(a=arrays(float shape=st.integers(min_value=1 max_value=100) elements=st.floats(min_value=-1e3 max_value=1e3 allow_infinity=<false> allow_nan=<false>) ) bounds=st.lists(st.floats(min_value=0 max_value=100 allow_infinity=<false> allow_nan=<false>) min_size=2 max_size=2 unique=<true> ) eps=st.floats(min_value=1e-6 max_value=1 allow_infinity=<false> allow_nan=<false>) )<def_stmt>test_flat_arrays self a:ndarray bounds:List[float] eps:float<block_start>lb=min(bounds)<line_sep>ub=max(bounds)+2<times>eps<line_sep>a_logged=clipped_log(a lb=lb ub=ub eps=eps)<assert_stmt>a_logged.shape<eq>a.shape<if_stmt>(a<le>lb).any()<block_start>assert_almost_equal(np.abs(a_logged-np.log(lb+eps)).min() 0)<block_end><else_stmt><block_start><assert_stmt>(a_logged<ge>np.log(lb+eps)).all()<block_end><if_stmt>(a<ge>ub).any()<block_start>assert_almost_equal(np.abs(a_logged-np.log(ub-eps)).min() 0)<block_end><else_stmt><block_start><assert_stmt>(a_logged<le>np.log(ub-eps)).all()<block_end><block_end>@given(a=arrays(float shape=st.tuples(st.integers(min_value=1 max_value=100) st.integers(min_value=1 max_value=100) ) elements=st.floats(min_value=-1e3 max_value=1e3 allow_infinity=<false> allow_nan=<false>) ) bounds=st.lists(st.floats(min_value=0 max_value=100 allow_infinity=<false> allow_nan=<false>) min_size=2 max_size=2 unique=<true> ) eps=st.floats(min_value=1e-6 max_value=1 allow_infinity=<false> allow_nan=<false>) )<def_stmt>test_2d_arrays self a:ndarray bounds:List[float] eps:float<block_start>lb=min(bounds)<line_sep>ub=max(bounds)+2<times>eps<line_sep>a_logged=clipped_log(a lb=lb ub=ub eps=eps)<assert_stmt>a_logged.shape<eq>a.shape<if_stmt>(a<le>lb).any()<block_start>assert_almost_equal(np.abs(a_logged-np.log(lb+eps)).min() 0)<block_end><else_stmt><block_start><assert_stmt>(a_logged<ge>np.log(lb+eps)).all()<block_end><if_stmt>(a<ge>ub).any()<block_start>assert_almost_equal(np.abs(a_logged-np.log(ub-eps)).min() 0)<block_end><else_stmt><block_start><assert_stmt>(a_logged<le>np.log(ub-eps)).all()<block_end><block_end><block_end><class_stmt>TestInvert<block_start>@given(a=arrays(float shape=st.integers(min_value=1 max_value=100) elements=st.floats(max_value=1e3 allow_infinity=<false> allow_nan=<false>) ))<def_stmt>test_flat_arrays self a:ndarray<block_start>a_inv=invert(a)<if_stmt>a[a<ne>0].size<eq>0<block_start><assert_stmt>a_inv[a<ne>0].size<eq>0<block_end><else_stmt><block_start>assert_array_equal(a_inv[a<ne>0] 1/a[a<ne>0])<block_end><if_stmt>0<in>a<block_start><assert_stmt>np.isnan(a_inv[a<eq>0]).all()<block_end><else_stmt><block_start><assert_stmt>set(a_inv[a<eq>0])<eq>set()<block_end><block_end>@given(a=arrays(float shape=st.tuples(st.integers(min_value=1 max_value=100) st.integers(min_value=1 max_value=100) ) elements=st.floats(max_value=1e3 allow_infinity=<false> allow_nan=<false>) ))<def_stmt>test_2d_arrays self a:ndarray<block_start>a_inv=invert(a)<if_stmt>a[a<ne>0].size<eq>0<block_start><assert_stmt>a_inv[a<ne>0].size<eq>0<block_end><else_stmt><block_start>assert_array_equal(a_inv[a<ne>0] 1/a[a<ne>0])<block_end><if_stmt>0<in>a<block_start><assert_stmt>np.isnan(a_inv[a<eq>0]).all()<block_end><else_stmt><block_start><assert_stmt>set(a_inv[a<eq>0])<eq>set()<block_end><block_end><block_end># TODO: Extend test to generate sparse inputs as well # TODO: Make test to generate two different arrays a1, a2 # TODO: Check why tests fail with assert_almost_equal <class_stmt>TestProdSum<block_start>@given(a=arrays(float shape=st.integers(min_value=1 max_value=100) elements=st.floats(max_value=1e3 allow_infinity=<false> allow_nan=<false>) ) axis=st.integers(min_value=0 max_value=1) )<def_stmt>test_flat_array self a:ndarray axis:int<block_start><assert_stmt>np.allclose((a<times>a).sum(axis=0) prod_sum(a a axis=axis))<block_end>@given(a=arrays(float shape=st.tuples(st.integers(min_value=1 max_value=100) st.integers(min_value=1 max_value=100) ) elements=st.floats(max_value=1e3 allow_infinity=<false> allow_nan=<false>) ) axis=st.integers(min_value=0 max_value=1) )<def_stmt>test_2d_array self a:ndarray axis:int<block_start><assert_stmt>np.allclose((a<times>a).sum(axis=axis) prod_sum(a a axis=axis))<block_end><block_end># TODO: Extend test to generate sparse inputs as well <class_stmt>TestSum<block_start>@given(a=arrays(float shape=st.integers(min_value=1 max_value=100) elements=st.floats(max_value=1e3 allow_infinity=<false> allow_nan=<false>) ) )<def_stmt>test_flat_arrays self a:ndarray<block_start>a_summed=sum(a=a axis=0)<line_sep>assert_array_equal(a_summed a.sum(axis=0))<block_end>@given(a=arrays(float shape=st.tuples(st.integers(min_value=1 max_value=100) st.integers(min_value=1 max_value=100) ) elements=st.floats(max_value=1e3 allow_infinity=<false> allow_nan=<false>) ) axis=st.integers(min_value=0 max_value=1) )<def_stmt>test_2d_arrays self a:ndarray axis:int<block_start>a_summed=sum(a=a axis=axis)<if_stmt>a.ndim<eq>1<block_start>axis=0<block_end>assert_array_equal(a_summed a.sum(axis=axis))<block_end><block_end>
# Generated by Django 2.2.14 on 2020-07-17 12:04 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("django_apscheduler" "0006_remove_djangojob_name") ]<line_sep>operations=[migrations.AlterField(model_name="djangojobexecution" name="id" field=models.BigAutoField(help_text="Unique ID for this job execution." primary_key=<true> serialize=<false> ) ) ]<block_end>
<import_from_stmt>keras.layers Input Dense<import_from_stmt>keras.models Model<import_from_stmt>keras.datasets mnist<import_from_stmt>keras backend<as>K<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pickle<line_sep># Deep Autoencoder features_path='deep_autoe_features.pickle'<line_sep>labels_path='deep_autoe_labels.pickle'<line_sep># this is the size of our encoded representations encoding_dim=32# 32 floats -> compression factor 24.5, assuming the input is 784 floats # this is our input placeholder; 784 = 28 x 28 input_img=Input(shape=(784 ))<line_sep>my_epochs=100<line_sep># "encoded" is the encoded representation of the inputs encoded=Dense(encoding_dim<times>4 activation='relu')(input_img)<line_sep>encoded=Dense(encoding_dim<times>2 activation='relu')(encoded)<line_sep>encoded=Dense(encoding_dim activation='relu')(encoded)<line_sep># "decoded" is the lossy reconstruction of the input decoded=Dense(encoding_dim<times>2 activation='relu')(encoded)<line_sep>decoded=Dense(encoding_dim<times>4 activation='relu')(decoded)<line_sep>decoded=Dense(784 activation='sigmoid')(decoded)<line_sep># this model maps an input to its reconstruction autoencoder=Model(input_img decoded)<line_sep># Separate Encoder model # this model maps an input to its encoded representation encoder=Model(input_img encoded)<line_sep># Separate Decoder model # create a placeholder for an encoded (32-dimensional) input encoded_input=Input(shape=(encoding_dim ))<line_sep># retrieve the layers of the autoencoder model decoder_layer1=autoencoder.layers[-3]<line_sep>decoder_layer2=autoencoder.layers[-2]<line_sep>decoder_layer3=autoencoder.layers[-1]<line_sep># create the decoder model decoder=Model(encoded_input decoder_layer3(decoder_layer2(decoder_layer1(encoded_input))))<line_sep># Train to reconstruct MNIST digits # configure model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer autoencoder.compile(optimizer='adadelta' loss='binary_crossentropy')<line_sep># prepare input data (x_train _),(x_test y_test)=mnist.load_data()<line_sep># normalize all values between 0 and 1 and flatten the 28x28 images into vectors of size 784 x_train=x_train.astype('float32')/255.<line_sep>x_test=x_test.astype('float32')/255.<line_sep>x_train=x_train.reshape((len(x_train) np.prod(x_train.shape[1:])))<line_sep>x_test=x_test.reshape((len(x_test) np.prod(x_test.shape[1:])))<line_sep>print(x_train.shape)<line_sep>print(x_test.shape)<line_sep># Train autoencoder for 50 epochs autoencoder.fit(x_train x_train epochs=my_epochs batch_size=256 shuffle=<true> validation_data=(x_test x_test) verbose=2)<line_sep># after 100 epochs the autoencoder seems to reach a stable train/test lost value # Visualize the reconstructed encoded representations # encode and decode some digits # note that we take them from the *test* set encoded_imgs=encoder.predict(x_test)<line_sep>decoded_imgs=decoder.predict(encoded_imgs)<line_sep># save latent space features 32-d vector pickle.dump(encoded_imgs open(features_path 'wb'))<line_sep>pickle.dump(y_test open(labels_path 'wb'))<line_sep>n=10# how many digits we will display plt.figure(figsize=(10 2) dpi=100)<for_stmt>i range(n)# display original <block_start>ax=plt.subplot(2 n i+1)<line_sep>plt.imshow(x_test[i].reshape(28 28))<line_sep>plt.gray()<line_sep>ax.set_axis_off()<line_sep># display reconstruction ax=plt.subplot(2 n i+n+1)<line_sep>plt.imshow(decoded_imgs[i].reshape(28 28))<line_sep>plt.gray()<line_sep>ax.set_axis_off()<block_end>plt.show()<line_sep>K.clear_session()<line_sep>
print('hello')<line_sep>print('how are you')<line_sep>print(2+4<times>2)<line_sep>
[x<for>x s].copy()<line_sep>