content
stringlengths
0
1.55M
<import_stmt>sys os<line_sep>sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))<import_from_stmt>sportsipy.ncaab.conferences Conferences<import_from_stmt>sportsipy.ncaab.rankings Rankings<import_from_stmt>sportsipy.ncaab.teams Teams<for_stmt>team Teams()<block_start>print(team.name)<for_stmt>player team.roster.players<block_start>print(player.name.encode('utf-8'))<block_end><for_stmt>game team.schedule<block_start>print(game.dataframe)<line_sep>print(game.dataframe_extended)<block_end><block_end>conferences=Conferences()<line_sep>print(conferences.conferences)<line_sep>print(conferences.team_conference)<line_sep>rankings=Rankings()<line_sep>print(rankings.current)<line_sep>print(rankings.current_extended)<line_sep>print(rankings.complete)<line_sep>
<import_from_stmt>sp_api.api FbaInboundEligibility<def_stmt>test_inbound_eligibility <block_start>res=FbaInboundEligibility().get_item_eligibility_preview(asin='TEST_CASE_200' program="INBOUND")<assert_stmt>res.payload<is><not><none><block_end>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Copyright 2013 California Institute of Technology. ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # United States Government Sponsorship acknowledged. This software is subject to # U.S. export control laws and regulations and has been classified as 'EAR99 NLR' # (No [Export] License Required except when exporting to an embargoed country, # end user, or in support of a prohibited end use). By downloading this software, # the user agrees to comply with all applicable U.S. export laws and regulations. # The user has the responsibility to obtain export licenses, or other export # authority as may be required before exporting this software to any 'EAR99' # embargoed foreign country or citizen of those countries. # # Authors: <NAME>, <NAME> #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Comment: Adapted from InsarProc/runResamp_image.py <import_stmt>os<import_stmt>logging<import_stmt>isceobj<import_stmt>stdproc<import_from_stmt>iscesys.ImageUtil.ImageUtil ImageUtil<as>IU<line_sep>logger=logging.getLogger('isce.isceProc.runResamp_image')<def_stmt>runResamp_image self<block_start>refPol=self._isce.refPol<line_sep>stdWriter=self._stdWriter<line_sep>dopplerCentroid=self._isce.dopplerCentroid<line_sep>looks=self._isce.numberLooks<line_sep>numFitCoeff=self._isce.numberFitCoefficients<line_sep>offsetImageName=self._isce.offsetImageName<line_sep>pixelSpacing=self._isce.slantRangePixelSpacing<line_sep>lines=self._isce.numberResampLines<for_stmt>sceneid1,sceneid2 self._isce.pairsToCoreg<block_start>pair=(sceneid1 sceneid2)<line_sep>imageSlc1=self._isce.slcImages[sceneid1][refPol]<line_sep>frame1=self._isce.frames[sceneid1][refPol]<line_sep>instrument=frame1.getInstrument()<line_sep>offsetField=self._isce.refinedOffsetFields[pair]#offsetField is the same for all pols imageSlc2=self._isce.slcImages[sceneid2][refPol]<line_sep>catalog=isceobj.Catalog.createCatalog(self._isce.procDoc.name)<line_sep>sid=self._isce.formatname(pair)<line_sep>offsetFilename=os.path.join(self.getoutputdir(sceneid1 sceneid2) self._isce.formatname(pair ext=offsetImageName))<line_sep>imageAz,imageRn=run(imageSlc1 imageSlc2 offsetField instrument dopplerCentroid looks lines numFitCoeff pixelSpacing offsetFilename stdWriter catalog=catalog sceneid=sid)<line_sep>self._isce.offsetAzimuthImages[pair]=imageAz<line_sep>self._isce.offsetRangeImages[pair]=imageRn<block_end><block_end><def_stmt>run imageSlc1 imageSlc2 offsetField instrument dopplerCentroid looks lines numFitCoeff pixelSpacing offsetFilename stdWriter catalog=<none> sceneid='NO_ID'<block_start>widthSlc=max(imageSlc1.getWidth() imageSlc2.getWidth())<line_sep>dopplerCoeff=dopplerCentroid.getDopplerCoefficients(inHz=<false>)<line_sep>path,filename=os.path.split(offsetFilename)<line_sep>offsetAz=os.path.join(path 'azimuth_'+filename)<line_sep>offsetRn=os.path.join(path 'range_'+filename)<line_sep>widthOffset=int(widthSlc/looks)<line_sep>imageAz=isceobj.createOffsetImage()<line_sep>imageAz.setFilename(offsetAz)<line_sep>imageAz.setWidth(widthOffset)<line_sep>imageRn=isceobj.createOffsetImage()<line_sep>imageRn.setFilename(offsetRn)<line_sep>imageRn.setWidth(widthOffset)<line_sep>objAz=isceobj.createOffsetImage()<line_sep>objRn=isceobj.createOffsetImage()<line_sep>IU.copyAttributes(imageAz objAz)<line_sep>IU.copyAttributes(imageRn objRn)<line_sep>objAz.setAccessMode('write')<line_sep>objAz.createImage()<line_sep>objRn.setAccessMode('write')<line_sep>objRn.createImage()<line_sep>objResamp_image=stdproc.createResamp_image()<line_sep>objResamp_image.wireInputPort(name='offsets' object=offsetField)<line_sep>objResamp_image.wireInputPort(name='instrument' object=instrument)<line_sep>objResamp_image.setSlantRangePixelSpacing(pixelSpacing)<line_sep>objResamp_image.setDopplerCentroidCoefficients(dopplerCoeff)<line_sep>objResamp_image.setNumberLooks(looks)<line_sep>objResamp_image.setNumberLines(lines)<line_sep>objResamp_image.setNumberRangeBin(widthSlc)<line_sep>objResamp_image.setNumberFitCoefficients(numFitCoeff)<line_sep>#set the tag used in the outfile. each message is precided by this tag #is the writer is not of "file" type the call has no effect objResamp_image.stdWriter=stdWriter.set_file_tags("resamp_image" "log" "err" "out")<line_sep>objResamp_image.resamp_image(objRn objAz)<if_stmt>catalog<is><not><none># Record the inputs and outputs <block_start>isceobj.Catalog.recordInputsAndOutputs(catalog objResamp_image "runResamp_image.%s"%sceneid logger "runResamp_image.%s"%sceneid)<block_end>objRn.finalizeImage()<line_sep>objAz.finalizeImage()<line_sep><return>imageAz imageRn<block_end>
# Copyright (c) 2021, <NAME> # License: MIT License <import_stmt>pytest<line_sep>pytest.importorskip("PySide6")<import_from_stmt>io StringIO<import_stmt>math<import_from_stmt>ezdxf.lldxf.tags Tags DXFTag<import_from_stmt>ezdxf.lldxf.loader load_dxf_structure<import_from_stmt>ezdxf.lldxf.tagger ascii_tags_loader<import_from_stmt>ezdxf.addons.browser DXFTagsModel DXFStructureModel DXFDocument<import_from_stmt>ezdxf.addons.browser.tags compile_tags<import_from_stmt>ezdxf.addons.browser.data EntityIndex EntityHistory SearchIndex <import_from_stmt>ezdxf.addons.xqt Qt QModelIndex<def_stmt>txt2tags s:str<arrow>Tags<block_start><return>Tags(ascii_tags_loader(StringIO(s) skip_comments=<false>))<block_end>NAN=float("nan")<line_sep># noinspection PyMissingConstructor <class_stmt>ModelIndex(QModelIndex)<block_start>"""Proxy"""<def_stmt>__init__ self row col<block_start>self._row=row<line_sep>self._col=col<block_end><def_stmt>row self<block_start><return>self._row<block_end><def_stmt>column self<block_start><return>self._col<block_end><block_end><class_stmt>TestDXFTagsModel<block_start><def_stmt>tags self<block_start><return>txt2tags(POINT)<block_end>@pytest.fixture<def_stmt>model self<block_start><return>DXFTagsModel(self.tags())<block_end><def_stmt>test_fixed_column_count self model<block_start><assert_stmt>model.columnCount()<eq>3<block_end><def_stmt>test_row_count self model<block_start><assert_stmt>model.rowCount()<eq>len(compile_tags(self.tags()))<block_end><def_stmt>test_render_display_role self model<block_start><assert_stmt>model.data(ModelIndex(0 0) role=Qt.DisplayRole)<eq>"0"<assert_stmt>model.data(ModelIndex(0 1) role=Qt.DisplayRole)<eq>"<ctrl>"<assert_stmt>model.data(ModelIndex(0 2) role=Qt.DisplayRole)<eq>"POINT"<block_end><block_end>POINT="""0 POINT 5 0 330 0 100 AcDbEntity 8 0 100 AcDbPoint 10 0.0 20 0.0 30 0.0 """<def_stmt>test_setup_dxf_structure_model <block_start>sections=load_dxf_structure(txt2tags(ENTITIES))<line_sep>doc=DXFDocument(sections)<line_sep>model=DXFStructureModel("ez.dxf" doc)<line_sep>parent=model.item(0 0)<assert_stmt>parent.data(Qt.DisplayRole)<eq>"ez.dxf"<assert_stmt>"ENTITIES"<in>parent.child(0 0).data(Qt.DisplayRole)<line_sep># one level down parent=parent.child(0 0)<assert_stmt>"LINE"<in>parent.child(0 0).data(Qt.DisplayRole)<assert_stmt>"LINE"<in>parent.child(1 0).data(Qt.DisplayRole)<block_end><class_stmt>TestDXFDocument<block_start>@pytest.fixture<def_stmt>doc self<block_start>sections=load_dxf_structure(txt2tags(ENTITIES))<line_sep><return>DXFDocument(sections)<block_end><def_stmt>test_get_entity_returns_entity_tags self doc<block_start>entity=doc.get_entity("100")<assert_stmt>entity[0]<eq>(0 "LINE")<block_end><def_stmt>test_get_entity_by_invalid_handle_returns_none self doc<block_start><assert_stmt>doc.get_entity("XXX")<is><none><block_end><def_stmt>test_get_start_line_number_for_entity self doc<block_start>entity=doc.get_entity("101")<assert_stmt>doc.get_line_number(entity)<eq>9<block_end><def_stmt>test_get_entity_by_line_number self doc<block_start>entity=doc.get_entity("101")<assert_stmt>doc.get_entity_at_line(9)<is>entity<assert_stmt>doc.get_entity_at_line(10)<is>entity<assert_stmt>(doc.get_entity_at_line(99)<is>entity) "should return the last entity"<block_end><block_end><class_stmt>TestTagCompiler<block_start><def_stmt>test_compile_single_int self<block_start>tags=compile_tags(txt2tags("70\n3"))<assert_stmt>tags[0]<eq>(70 3)<block_end><def_stmt>test_compile_invalid_int_to_str self<block_start>tags=compile_tags(txt2tags("70\nx"))<assert_stmt>tags[0]<eq>(70 "x")<block_end><def_stmt>test_compile_single_float self<block_start>tags=compile_tags(txt2tags("40\n3.14"))<assert_stmt>tags[0]<eq>(40 3.14)<block_end><def_stmt>test_compile_invalid_float_to_str self<block_start>tags=compile_tags(txt2tags("40\nx.14"))<assert_stmt>tags[0]<eq>(40 "x.14")<block_end><def_stmt>test_compile_single_2d_point self<block_start>tags=compile_tags(txt2tags("10\n1.2\n20\n2.3"))<assert_stmt>tags[0]<eq>(10 (1.2 2.3))<block_end><def_stmt>test_compile_two_2d_points self<block_start>tags=compile_tags(txt2tags("10\n1.1\n20\n1.2\n10\n2.1\n20\n2.2"))<assert_stmt>tags[0]<eq>(10 (1.1 1.2))<assert_stmt>tags[1]<eq>(10 (2.1 2.2))<block_end><def_stmt>test_compile_nan_coords_2d self<block_start>tags=compile_tags(txt2tags("10\nx.2\n20\n2.3"))<assert_stmt>math.isnan(tags[0].value[0])<block_end><def_stmt>test_compile_single_3d_point self<block_start>tags=compile_tags(txt2tags("10\n1.2\n20\n2.3\n30\n3.4"))<assert_stmt>tags[0]<eq>(10 (1.2 2.3 3.4))<block_end><def_stmt>test_compile_nan_coords_3d self<block_start>tags=compile_tags(txt2tags("10\n1\n20\n2\n30\nx"))<assert_stmt>math.isnan(tags[0].value[2])<block_end><def_stmt>test_compile_single_group_code_10 self<block_start>tags=compile_tags(txt2tags("10\n1.1"))<assert_stmt>tags[0]<eq>(10 1.1)<block_end><def_stmt>test_compile_two_group_code_10 self<block_start>tags=compile_tags(txt2tags("10\n1.1\n10\n2.2"))<assert_stmt>tags[0]<eq>(10 1.1)<assert_stmt>tags[1]<eq>(10 2.2)<block_end><def_stmt>test_compile_swapped_coords self<block_start>tags=compile_tags(txt2tags("20\n2.2\n10\n1.1"))<assert_stmt>tags[0]<eq>(20 2.2) "expected coords as single tags"<assert_stmt>tags[1]<eq>(10 1.1) "expected coords as single tags"<block_end><block_end>ENTITIES="""0 SECTION 2 ENTITIES 0 LINE 5 100 0 LINE 5 101 0 ENDSEC 0 EOF """<line_sep>SECTIONS="""0 SECTION 2 HEADER 9 $ACADVER 1 AC1032 0 ENDSEC 0 SECTION 2 ENTITIES 0 LINE 5 100 0 ENDSEC 0 EOF """<class_stmt>TestEntityIndex<block_start>@pytest.fixture(scope="class")<def_stmt>index self<block_start>data={"ENTITIES":[Tags([DXFTag(0 "ENTITY1") DXFTag(5 "F001")]) Tags([DXFTag(0 "ENTITY2") DXFTag(5 "F002")]) Tags([DXFTag(0 "ENTITY3") DXFTag(5 "F003")]) Tags([DXFTag(0 "ENTITY4") DXFTag(5 "F004")]) # last entity without handle, has dummy handle "*1" Tags([DXFTag(0 "ENTITY5") DXFTag(1 "DATA")]) ]}<line_sep><return>EntityIndex(data)<block_end><def_stmt>test_contains_all_entities self index<block_start><assert_stmt>"F001"<in>index<assert_stmt>"F002"<in>index<assert_stmt>"F003"<in>index<assert_stmt>"F004"<in>index<assert_stmt>"*1"<in>index "expected dummy handle"<block_end><def_stmt>test_get_entity_by_handle self index<block_start>tags=index.get("F001")<assert_stmt>tags[0]<eq>(0 "ENTITY1")<block_end><def_stmt>test_get_entity_by_dummy_handle self index<block_start>tags=index.get("*1")<assert_stmt>tags[0]<eq>(0 "ENTITY5")<block_end><def_stmt>test_get_handle_from_casted_tags self index<block_start>entity=Tags(index.get("F001"))<assert_stmt>index.get_handle(entity)<eq>"F001"<block_end><def_stmt>test_get_dummy_handle_from_casted_tags self index<block_start>entity=Tags(index.get("*1"))<assert_stmt>index.get_handle(entity)<eq>"*1"<block_end><def_stmt>test_get_next_entity self index<block_start>e1=index.get("F001")<line_sep>e2=index.get("F002")<assert_stmt>index.next_entity(e1)<is>e2<block_end><def_stmt>test_next_entity_of_last_entity_is_last_entity self index<block_start>e1=index.get("*1")<assert_stmt>index.next_entity(e1)<is>e1<block_end><def_stmt>test_get_prev_entity self index<block_start>e1=index.get("F001")<line_sep>e2=index.get("F002")<assert_stmt>index.previous_entity(e2)<is>e1<block_end><def_stmt>test_prev_entity_of_first_entity_is_first_entity self index<block_start>e1=index.get("F001")<assert_stmt>index.previous_entity(e1)<is>e1<block_end><def_stmt>test_max_line_number self index<block_start><assert_stmt>index.max_line_number<eq>20<block_end><def_stmt>test_get_start_line_number self index<block_start>e=index.get("F003")<assert_stmt>index.get_start_line_for_entity(e)<eq>9<block_end><def_stmt>test_get_start_line_number_for_dummy_handle self index<block_start>e=index.get("*1")<assert_stmt>index.get_start_line_for_entity(e)<eq>17<block_end><def_stmt>test_entity_at_line self index<block_start>e3=index.get("F003")<assert_stmt>index.get_entity_at_line(9)<is>e3<assert_stmt>index.get_entity_at_line(10)<is>e3<block_end><def_stmt>test_entity_at_line_for_dummy_handle self index<block_start>e=index.get("*1")<assert_stmt>index.get_entity_at_line(19)<is>e<assert_stmt>index.get_entity_at_line(20)<is>e<block_end><block_end><def_stmt>test_entity_index_adds_missing_endsec_tag # The function load_dxf_structure() throws the ENDSEC tag away. # The entity indexer must take this issue into account! <block_start>sections=load_dxf_structure(txt2tags(SECTIONS))<line_sep>index=EntityIndex(sections)<line_sep>entity=index.get_entity_at_line(15)<assert_stmt>entity.get_handle()<eq>"100"<assert_stmt>index.get_start_line_for_entity(entity)<eq>15<block_end><class_stmt>TestEntityHistory<block_start>@pytest.fixture<def_stmt>history2 self<block_start>history=EntityHistory()<line_sep>history.append(Tags([DXFTag(1 "first")]))<line_sep>history.append(Tags([DXFTag(2 "second")]))<line_sep><return>history<block_end><def_stmt>test_setup_history self<block_start>history=EntityHistory()<assert_stmt>len(history)<eq>0<assert_stmt>history.index<eq>0<block_end><def_stmt>test_empty_history_returns_none self<block_start>history=EntityHistory()<assert_stmt>history.back()<is><none><assert_stmt>history.forward()<is><none><block_end><def_stmt>test_append_one_entity self<block_start>history=EntityHistory()<line_sep>history.append(Tags())<assert_stmt>len(history)<eq>1<assert_stmt>history.index<eq>0<block_end><def_stmt>test_append_two_entities self<block_start>history=EntityHistory()<line_sep>history.append(Tags())<line_sep>history.append(Tags())<assert_stmt>len(history)<eq>2<assert_stmt>history.index<eq>1<block_end><def_stmt>test_go_back_in_history self history2<block_start>first,second=history2.content()<assert_stmt>history2.index<eq>1<assert_stmt>history2.back()<is>first<assert_stmt>len(history2)<eq>2 "entity is still in history"<assert_stmt>history2.index<eq>0<block_end><def_stmt>test_go_back_and_forward_in_history self history2<block_start>first,second=history2.content()<assert_stmt>history2.back()<is>first<assert_stmt>history2.forward()<is>second<block_end><def_stmt>test_append_should_add_time_travel_history self history2<block_start>first,second=history2.content()<assert_stmt>history2.back()<is>first# 1st time travel <assert_stmt>history2.index<eq>0<assert_stmt>history2.forward()<is>second# 2nd time travel <assert_stmt>history2.index<eq>1<line_sep>third=Tags([DXFTag(3 "third")])<line_sep>history2.append(third)<assert_stmt>history2.index<eq>4<line_sep># complete travel history content=history2.content()<assert_stmt>len(content)<eq>5<line_sep># time wraps -> append <assert_stmt>content<eq>[first second first second third]<block_end><block_end>SEARCH_EXAMPLE1="""0 SEARCH1 8 LayerName1 62 7 """<line_sep>SEARCH_EXAMPLE2="""0 SEARCH2 8 LayerName2 62 6 """<class_stmt>TestSearchIndex<block_start>@pytest.fixture(scope="class")<def_stmt>entities self<block_start><return>[txt2tags(SEARCH_EXAMPLE1) txt2tags(SEARCH_EXAMPLE2)]<block_end>@pytest.fixture<def_stmt>search self entities<block_start><return>SearchIndex(entities)<block_end>@staticmethod<def_stmt>move_cursor_forward s:SearchIndex count:int<block_start><for_stmt>_ range(count)<block_start>s.move_cursor_forward()<block_end><block_end>@staticmethod<def_stmt>move_cursor_backward s:SearchIndex count:int<block_start><for_stmt>_ range(count)<block_start>s.move_cursor_backward()<block_end><block_end><def_stmt>test_valid_setup_and_default_settings self search<block_start><assert_stmt>len(search.entities)<eq>2<assert_stmt>search.is_end_of_index<is><false><assert_stmt>(search.case_insensitive<is><true>) "should be case insensitive by default"<assert_stmt>(search.numbers<is><false>) "should not search in number tags by default"<block_end><def_stmt>test_empty_search_index self<block_start>search_index=SearchIndex([])<assert_stmt>search_index.is_end_of_index<is><true><block_end><def_stmt>test_reset_cursor_forward self search<block_start>search.reset_cursor(backward=<false>)<assert_stmt>search.cursor()<eq>(0 0 ) "cursor should be the first tag of the first entity"<assert_stmt>search.is_end_of_index<is><false><block_end><def_stmt>test_move_cursor_forward self search<block_start>search.reset_cursor()<line_sep>search.move_cursor_forward()<assert_stmt>search.cursor()<eq>(0 1)<block_end><def_stmt>test_move_cursor_forward_beyond_entity_border self search<block_start>search.reset_cursor()<line_sep>self.move_cursor_forward(search 3)<assert_stmt>search.cursor()<eq>(1 0)<block_end><def_stmt>test_move_cursor_forward_to_the_end_of_index self search<block_start>search.reset_cursor()<line_sep>self.move_cursor_forward(search 10)<assert_stmt>search.is_end_of_index<is><true><assert_stmt>search.cursor()<eq>(1 2 ) "index should stop at the last tag of the last entity"<block_end><def_stmt>test_reset_cursor_backward self search<block_start>search.reset_cursor(backward=<true>)<assert_stmt>search.cursor()<eq>(1 2 ) "cursor should be the last tag of the last entity"<assert_stmt>search.is_end_of_index<is><false><block_end><def_stmt>test_move_cursor_backward self search<block_start>search.reset_cursor(backward=<true>)<line_sep>search.move_cursor_backward()<assert_stmt>search.cursor()<eq>(1 1)<block_end><def_stmt>test_move_cursor_backward_beyond_entity_border self search<block_start>search.reset_cursor(backward=<true>)<line_sep>self.move_cursor_backward(search 3)<assert_stmt>search.cursor()<eq>(0 2)<block_end><def_stmt>test_move_cursor_backward_to_the_end_of_index self search<block_start>search.reset_cursor()<line_sep>self.move_cursor_backward(search 10)<assert_stmt>search.is_end_of_index<is><true><assert_stmt>search.cursor()<eq>(0 0 ) "index should stop at the first tag of the first entity"<block_end><def_stmt>test_failing_search self search<block_start>entity,index=search.find("XDATA")<assert_stmt>entity<is><none><assert_stmt>index<eq>-1<assert_stmt>search.is_end_of_index<is><true><block_end><def_stmt>test_find_entity_type self search<block_start>entity,index=search.find("SEARCH1")<assert_stmt>entity<is>search.entities[0]<assert_stmt>index<eq>0<block_end><def_stmt>test_find_forward_entity_type self search<block_start>search.find("SEARCH")<line_sep>entity,index=search.find_forward()<assert_stmt>entity<is>search.entities[1]<assert_stmt>index<eq>0<block_end><def_stmt>test_find_content self search<block_start>entity,index=search.find("LayerName1")<assert_stmt>entity<is>search.entities[0]<assert_stmt>index<eq>1<block_end><def_stmt>test_find_forward_content self search<block_start>search.find("LayerName")<line_sep>entity,index=search.find_forward()<assert_stmt>entity<is>search.entities[1]<assert_stmt>index<eq>1<block_end><def_stmt>test_failing_find_forward_returns_none self search<block_start>search.find("LayerName")<line_sep>search.find_forward()<line_sep>entity,index=search.find_forward()<assert_stmt>entity<is><none><assert_stmt>index<eq>-1<block_end><def_stmt>test_not_initiated_find_forward_returns_none self search<block_start>entity,index=search.find_forward()<assert_stmt>entity<is><none><assert_stmt>index<eq>-1<block_end><def_stmt>test_case_insensitive_search self search<block_start>search.case_insensitive=<true><line_sep>entity,index=search.find("LAYERNAME1")<assert_stmt>entity<is>search.entities[0]<assert_stmt>index<eq>1<block_end><def_stmt>test_case_sensitive_search self search<block_start>search.case_insensitive=<false><line_sep>entity,index=search.find("LAYERNAME1")<assert_stmt>entity<is><none><block_end><def_stmt>test_ignore_number_tags self search<block_start>search.numbers=<false><line_sep>entity,index=search.find("6")<assert_stmt>entity<is><none><block_end><def_stmt>test_search_in_number_tags self search<block_start>search.numbers=<true><line_sep>entity,index=search.find("6")<assert_stmt>entity<is>search.entities[1]<assert_stmt>index<eq>2<block_end><def_stmt>test_failing_find_forward_stops_at_the_end self search<block_start><assert_stmt>search.find("XXX")<is>search.NOT_FOUND<assert_stmt>search.is_end_of_index<is><true><block_end><def_stmt>test_failing_find_backwards_stops_at_the_beginning self search<block_start><assert_stmt>search.find("XXX" backward=<true>)<is>search.NOT_FOUND<assert_stmt>search.is_end_of_index<is><true><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>pytest.main([__file__])<block_end>
<import_stmt>os<import_stmt>platform<import_stmt>popen2<import_from_stmt>flask Flask request<line_sep>app=Flask(__name__)<line_sep>@app.route("/python2-specific")<def_stmt>python2_specific <block_start>""" These tests are mostly included to check for extra paths that can be generated if we can track flow into the implementation of a stdlib function, and then to another sink. See comment in query for more details. """<line_sep>files=request.args.get("files" "")<line_sep>os.popen2("ls "+files)<line_sep>os.popen3("ls "+files)<line_sep>os.popen4("ls "+files)<line_sep>platform.popen("ls "+files)<line_sep>popen2.popen2("ls "+files)<line_sep>popen2.popen3("ls "+files)<line_sep>popen2.popen4("ls "+files)<line_sep>popen2.Popen3("ls "+files)<line_sep>popen2.Popen4("ls "+files)<block_end>
<import_stmt>dash_bootstrap_components<as>dbc<line_sep>progress=dbc.Progress(value=50)<line_sep>
<import_stmt>os<line_sep>counts={"0":0 "1":0}<for_stmt>file os.listdir("./Dataset")<block_start><if_stmt>file.endswith(".txt")<and>file<ne>'classes.txt'<block_start>f=open("./Dataset/"+file)<line_sep>lines=f.readlines()<for_stmt>line lines<block_start>counts[''+line[0]]<augadd>1<block_end><block_end><block_end>print(counts)<line_sep>
<import_from_stmt>softdelete.admin.admin *<import_from_stmt>softdelete.admin.forms *<line_sep>__all__=['SoftDeleteObjectAdmin' 'SoftDeleteRecordAdmin' 'ChangeSetAdmin' 'SoftDeleteObjectInline' 'SoftDeleteObjectAdminForm' ]<line_sep>
<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>numpy<as>np<import_from_stmt>config cfg<import_stmt>copy<import_stmt>json<import_stmt>scipy.io<as>sio<import_stmt>cv2<import_stmt>random<import_stmt>math<import_stmt>torch<import_stmt>transforms3d<import_from_stmt>pycocotools.coco COCO<import_from_stmt>utils.smpl SMPL<import_from_stmt>utils.preprocessing load_img process_bbox augmentation<import_from_stmt>utils.vis vis_keypoints vis_mesh save_obj<import_from_stmt>utils.transforms world2cam cam2pixel pixel2cam transform_joint_to_other_db<class_stmt>MSCOCO(torch.utils.data.Dataset)<block_start><def_stmt>__init__ self transform data_split<block_start>self.transform=transform<line_sep>self.data_split='train'<if>data_split<eq>'train'<else>'val'<line_sep>self.img_path=osp.join('..' 'data' 'MSCOCO' 'images')<line_sep>self.annot_path=osp.join('..' 'data' 'MSCOCO' 'annotations')<line_sep>self.rootnet_output_path=osp.join('..' 'data' 'MSCOCO' 'rootnet_output' 'bbox_root_coco_output.json')<line_sep>self.fitting_thr=3.0# pixel in cfg.output_hm_shape space # mscoco skeleton self.coco_joint_num=18# original: 17, manually added pelvis self.coco_joints_name=('Nose' 'L_Eye' 'R_Eye' 'L_Ear' 'R_Ear' 'L_Shoulder' 'R_Shoulder' 'L_Elbow' 'R_Elbow' 'L_Wrist' 'R_Wrist' 'L_Hip' 'R_Hip' 'L_Knee' 'R_Knee' 'L_Ankle' 'R_Ankle' 'Pelvis')<line_sep>self.coco_skeleton=((1 2) (0 1) (0 2) (2 4) (1 3) (6 8) (8 10) (5 7) (7 9) (12 14) (14 16) (11 13) (13 15) (5 6) (11 12))<line_sep>self.coco_flip_pairs=((1 2) (3 4) (5 6) (7 8) (9 10) (11 12) (13 14) (15 16))<line_sep>self.coco_joint_regressor=np.load(osp.join('..' 'data' 'MSCOCO' 'J_regressor_coco_hip_smpl.npy'))<line_sep># smpl skeleton self.smpl=SMPL()<line_sep>self.face=self.smpl.face<line_sep>self.joint_regressor=self.smpl.joint_regressor<line_sep>self.vertex_num=self.smpl.vertex_num<line_sep>self.joint_num=self.smpl.joint_num<line_sep>self.joints_name=self.smpl.joints_name<line_sep>self.flip_pairs=self.smpl.flip_pairs<line_sep>self.skeleton=self.smpl.skeleton<line_sep>self.root_joint_idx=self.smpl.root_joint_idx<line_sep>self.face_kps_vertex=self.smpl.face_kps_vertex<line_sep>self.datalist=self.load_data()<block_end><def_stmt>add_pelvis self joint_coord<block_start>lhip_idx=self.coco_joints_name.index('L_Hip')<line_sep>rhip_idx=self.coco_joints_name.index('R_Hip')<line_sep>pelvis=(joint_coord[lhip_idx :]+joint_coord[rhip_idx :])<times>0.5<line_sep>pelvis[2]=joint_coord[lhip_idx 2]<times>joint_coord[rhip_idx 2]# joint_valid pelvis=pelvis.reshape(1 3)<line_sep>joint_coord=np.concatenate((joint_coord pelvis))<line_sep><return>joint_coord<block_end><def_stmt>load_data self<block_start>db=COCO(osp.join(self.annot_path 'person_keypoints_'+self.data_split+'2017.json'))<with_stmt>open(osp.join(self.annot_path 'coco_smplifyx_train.json'))<as>f<block_start>smplify_results=json.load(f)<block_end>datalist=[]<if_stmt>self.data_split<eq>'train'<block_start><for_stmt>aid db.anns.keys()<block_start>ann=db.anns[aid]<line_sep>img=db.loadImgs(ann['image_id'])[0]<line_sep>imgname=osp.join('train2017' img['file_name'])<line_sep>img_path=osp.join(self.img_path imgname)<line_sep>width,height=img['width'] img['height']<if_stmt>ann['iscrowd']<or>(ann['num_keypoints']<eq>0)<block_start><continue><block_end># bbox bbox=process_bbox(ann['bbox'] width height)<if_stmt>bbox<is><none><block_start><continue><block_end># joint coordinates joint_img=np.array(ann['keypoints'] dtype=np.float32).reshape(-1 3)<line_sep>joint_img=self.add_pelvis(joint_img)<line_sep>joint_valid=(joint_img[: 2].copy().reshape(-1 1)<g>0).astype(np.float32)<line_sep>joint_img[: 2]=0<if_stmt>str(aid)<in>smplify_results<block_start>smplify_result=smplify_results[str(aid)]<block_end><else_stmt><block_start>smplify_result=<none><block_end>datalist.append({'img_path':img_path 'img_shape':(height width) 'bbox':bbox 'joint_img':joint_img 'joint_valid':joint_valid 'smplify_result':smplify_result})<block_end><block_end><else_stmt><block_start><with_stmt>open(self.rootnet_output_path)<as>f<block_start>rootnet_output=json.load(f)<block_end>print('Load RootNet output from '+self.rootnet_output_path)<for_stmt>i range(len(rootnet_output))<block_start>image_id=rootnet_output[i]['image_id']<if_stmt>image_id<not><in>db.imgs<block_start><continue><block_end>img=db.loadImgs(image_id)[0]<line_sep>imgname=osp.join('val2017' img['file_name'])<line_sep>img_path=osp.join(self.img_path imgname)<line_sep>height,width=img['height'] img['width']<line_sep>fx,fy,cx,cy=1500 1500 img['width']/2 img['height']/2<line_sep>focal=np.array([fx fy] dtype=np.float32)<line_sep>princpt=np.array([cx cy] dtype=np.float32)<line_sep>root_joint_depth=np.array(rootnet_output[i]['root_cam'][2])<line_sep>bbox=np.array(rootnet_output[i]['bbox']).reshape(4)<line_sep>cam_param={'focal':focal 'princpt':princpt}<line_sep>datalist.append({'img_path':img_path 'img_shape':(height width) 'bbox':bbox 'root_joint_depth':root_joint_depth 'cam_param':cam_param})<block_end><block_end><return>datalist<block_end><def_stmt>get_smpl_coord self smpl_param cam_param do_flip img_shape<block_start>pose,shape,trans=smpl_param['pose'] smpl_param['shape'] smpl_param['trans']<line_sep>smpl_pose=torch.FloatTensor(pose).view(1 -1)<line_sep>smpl_shape=torch.FloatTensor(shape).view(1 -1)<line_sep># smpl parameters (pose: 72 dimension, shape: 10 dimension) smpl_trans=torch.FloatTensor(trans).view(1 -1)# translation vector # flip smpl pose parameter (axis-angle) <if_stmt>do_flip<block_start>smpl_pose=smpl_pose.view(-1 3)<for_stmt>pair self.flip_pairs<block_start><if_stmt>pair[0]<l>len(smpl_pose)<and>pair[1]<l>len(smpl_pose)# face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose. <block_start>smpl_pose[pair[0] :],smpl_pose[pair[1] :]=smpl_pose[pair[1] :].clone() smpl_pose[pair[0] :].clone()<block_end><block_end>smpl_pose[: 1:3]<augmul>-1<line_sep># multiply -1 to y and z axis of axis-angle smpl_pose=smpl_pose.view(1 -1)<block_end># get mesh and joint coordinates smpl_mesh_coord,smpl_joint_coord=self.smpl.layer['neutral'](smpl_pose smpl_shape smpl_trans)<line_sep># incorporate face keypoints smpl_mesh_coord=smpl_mesh_coord.numpy().astype(np.float32).reshape(-1 3)<line_sep>smpl_joint_coord=smpl_joint_coord.numpy().astype(np.float32).reshape(-1 3)<line_sep>smpl_face_kps_coord=smpl_mesh_coord[self.face_kps_vertex :].reshape(-1 3)<line_sep>smpl_joint_coord=np.concatenate((smpl_joint_coord smpl_face_kps_coord))<line_sep># flip translation <if_stmt>do_flip# avg of old and new root joint should be image center. <block_start>focal,princpt=cam_param['focal'] cam_param['princpt']<line_sep>flip_trans_x=2<times>(((img_shape[1]-1)/2.-princpt[0])/focal[0]<times>(smpl_joint_coord[self.root_joint_idx 2]))-2<times>smpl_joint_coord[self.root_joint_idx][0]<line_sep>smpl_mesh_coord[: 0]<augadd>flip_trans_x<line_sep>smpl_joint_coord[: 0]<augadd>flip_trans_x<block_end># change to mean shape if beta is too far from it smpl_shape[(smpl_shape.abs()<g>3).any(dim=1)]=0.<line_sep><return>smpl_mesh_coord smpl_joint_coord smpl_pose[0].numpy() smpl_shape[0].numpy()<block_end><def_stmt>get_fitting_error self coco_joint smpl_mesh cam_param img2bb_trans coco_joint_valid# get coco joint from smpl mesh <block_start>coco_from_smpl=np.dot(self.coco_joint_regressor smpl_mesh)<line_sep>coco_from_smpl=self.add_pelvis(coco_from_smpl)# z-axis component will be removed coco_from_smpl=cam2pixel(coco_from_smpl cam_param['focal'] cam_param['princpt'])<line_sep>coco_from_smpl_xy1=np.concatenate((coco_from_smpl[: :2] np.ones_like(coco_from_smpl[: 0:1])) 1)<line_sep>coco_from_smpl[: :2]=np.dot(img2bb_trans coco_from_smpl_xy1.transpose(1 0)).transpose(1 0)<line_sep>coco_from_smpl[: 0]=coco_from_smpl[: 0]/cfg.input_img_shape[1]<times>cfg.output_hm_shape[2]<line_sep>coco_from_smpl[: 1]=coco_from_smpl[: 1]/cfg.input_img_shape[0]<times>cfg.output_hm_shape[1]<line_sep># mask joint coordinates coco_joint=coco_joint[: :2][np.tile(coco_joint_valid (1 2))<eq>1].reshape(-1 2)<line_sep>coco_from_smpl=coco_from_smpl[: :2][np.tile(coco_joint_valid (1 2))<eq>1].reshape(-1 2)<line_sep>error=np.sqrt(np.sum((coco_joint-coco_from_smpl)<power>2 1)).mean()<line_sep><return>error<block_end><def_stmt>__len__ self<block_start><return>len(self.datalist)<block_end><def_stmt>__getitem__ self idx<block_start>data=copy.deepcopy(self.datalist[idx])<line_sep>img_path,img_shape,bbox=data['img_path'] data['img_shape'] data['bbox']<line_sep># image load and affine transform img=load_img(img_path)<line_sep>img,img2bb_trans,bb2img_trans,rot,do_flip=augmentation(img bbox self.data_split)<line_sep>img=self.transform(img.astype(np.float32))/255.<if_stmt>self.data_split<eq>'train'# coco gt <block_start>coco_joint_img=data['joint_img']<line_sep>coco_joint_valid=data['joint_valid']<if_stmt>do_flip<block_start>coco_joint_img[: 0]=img_shape[1]-1-coco_joint_img[: 0]<for_stmt>pair self.coco_flip_pairs<block_start>coco_joint_img[pair[0] :],coco_joint_img[pair[1] :]=coco_joint_img[pair[1] :].copy() coco_joint_img[pair[0] :].copy()<line_sep>coco_joint_valid[pair[0] :],coco_joint_valid[pair[1] :]=coco_joint_valid[pair[1] :].copy() coco_joint_valid[pair[0] :].copy()<block_end><block_end>coco_joint_img_xy1=np.concatenate((coco_joint_img[: :2] np.ones_like(coco_joint_img[: :1])) 1)<line_sep>coco_joint_img[: :2]=np.dot(img2bb_trans coco_joint_img_xy1.transpose(1 0)).transpose(1 0)<line_sep>coco_joint_img[: 0]=coco_joint_img[: 0]/cfg.input_img_shape[1]<times>cfg.output_hm_shape[2]<line_sep>coco_joint_img[: 1]=coco_joint_img[: 1]/cfg.input_img_shape[0]<times>cfg.output_hm_shape[1]<line_sep># backup for calculating fitting error _coco_joint_img=coco_joint_img.copy()<line_sep>_coco_joint_valid=coco_joint_valid.copy()<line_sep># check truncation coco_joint_trunc=coco_joint_valid<times>((coco_joint_img[: 0]<ge>0)<times>(coco_joint_img[: 0]<l>cfg.output_hm_shape[2])<times>(coco_joint_img[: 1]<ge>0)<times>(coco_joint_img[: 1]<l>cfg.output_hm_shape[1])).reshape(-1 1).astype(np.float32)<line_sep># transform coco joints to target db joints coco_joint_img=transform_joint_to_other_db(coco_joint_img self.coco_joints_name self.joints_name)<line_sep>coco_joint_cam=np.zeros((self.joint_num 3) dtype=np.float32)# dummy coco_joint_valid=transform_joint_to_other_db(coco_joint_valid self.coco_joints_name self.joints_name)<line_sep>coco_joint_trunc=transform_joint_to_other_db(coco_joint_trunc self.coco_joints_name self.joints_name)<line_sep>smplify_result=data['smplify_result']<if_stmt>smplify_result<is><not><none># use fitted mesh <block_start>smpl_param,cam_param=smplify_result['smpl_param'] smplify_result['cam_param']<line_sep>smpl_mesh_cam,smpl_joint_cam,smpl_pose,smpl_shape=self.get_smpl_coord(smpl_param cam_param do_flip img_shape)<line_sep>smpl_coord_cam=np.concatenate((smpl_mesh_cam smpl_joint_cam))<line_sep>smpl_coord_img=cam2pixel(smpl_coord_cam cam_param['focal'] cam_param['princpt'])<line_sep># x,y affine transform, root-relative depth smpl_coord_img_xy1=np.concatenate((smpl_coord_img[: :2] np.ones_like(smpl_coord_img[: 0:1])) 1)<line_sep>smpl_coord_img[: :2]=np.dot(img2bb_trans smpl_coord_img_xy1.transpose(1 0)).transpose(1 0)[: :2]<line_sep>smpl_coord_img[: 2]=smpl_coord_img[: 2]-smpl_coord_cam[self.vertex_num+self.root_joint_idx][2]<line_sep>smpl_coord_img[: 0]=smpl_coord_img[: 0]/cfg.input_img_shape[1]<times>cfg.output_hm_shape[2]<line_sep>smpl_coord_img[: 1]=smpl_coord_img[: 1]/cfg.input_img_shape[0]<times>cfg.output_hm_shape[1]<line_sep>smpl_coord_img[: 2]=(smpl_coord_img[: 2]/(cfg.bbox_3d_size/2)+1)/2.<times>cfg.output_hm_shape[0]<line_sep># check truncation smpl_trunc=((smpl_coord_img[: 0]<ge>0)<times>(smpl_coord_img[: 0]<l>cfg.output_hm_shape[2])<times>(smpl_coord_img[: 1]<ge>0)<times>(smpl_coord_img[: 1]<l>cfg.output_hm_shape[1])<times>(smpl_coord_img[: 2]<ge>0)<times>(smpl_coord_img[: 2]<l>cfg.output_hm_shape[0])).reshape(-1 1).astype(np.float32)<line_sep># split mesh and joint coordinates smpl_mesh_img=smpl_coord_img[:self.vertex_num]<line_sep>smpl_joint_img=smpl_coord_img[self.vertex_num:]<line_sep>smpl_mesh_trunc=smpl_trunc[:self.vertex_num]<line_sep>smpl_joint_trunc=smpl_trunc[self.vertex_num:]<line_sep># if fitted mesh is too far from h36m gt, discard it is_valid_fit=<true><line_sep>error=self.get_fitting_error(_coco_joint_img smpl_mesh_cam cam_param img2bb_trans _coco_joint_valid)<if_stmt>error<g>self.fitting_thr<block_start>is_valid_fit=<false><block_end><block_end><else_stmt><block_start>smpl_joint_img=np.zeros((self.joint_num 3) dtype=np.float32)# dummy smpl_joint_cam=np.zeros((self.joint_num 3) dtype=np.float32)# dummy smpl_mesh_img=np.zeros((self.vertex_num 3) dtype=np.float32)# dummy smpl_pose=np.zeros((72) dtype=np.float32)# dummy smpl_shape=np.zeros((10) dtype=np.float32)# dummy smpl_joint_trunc=np.zeros((self.joint_num 1) dtype=np.float32)<line_sep>smpl_mesh_trunc=np.zeros((self.vertex_num 1) dtype=np.float32)<line_sep>is_valid_fit=<false><block_end># 3D data rotation augmentation rot_aug_mat=np.array([[np.cos(np.deg2rad(-rot)) -np.sin(np.deg2rad(-rot)) 0] [np.sin(np.deg2rad(-rot)) np.cos(np.deg2rad(-rot)) 0] [0 0 1]] dtype=np.float32)<line_sep># parameter smpl_pose=smpl_pose.reshape(-1 3)<line_sep>root_pose=smpl_pose[self.root_joint_idx :]<line_sep>root_pose,_=cv2.Rodrigues(root_pose)<line_sep>root_pose,_=cv2.Rodrigues(np.dot(rot_aug_mat root_pose))<line_sep>smpl_pose[self.root_joint_idx]=root_pose.reshape(3)<line_sep>smpl_pose=smpl_pose.reshape(-1)<line_sep># smpl coordinate smpl_joint_cam=smpl_joint_cam-smpl_joint_cam[self.root_joint_idx <none>]# root-relative smpl_joint_cam=np.dot(rot_aug_mat smpl_joint_cam.transpose(1 0)).transpose(1 0)<line_sep>inputs={'img':img}<line_sep>targets={'orig_joint_img':coco_joint_img 'fit_joint_img':smpl_joint_img 'fit_mesh_img':smpl_mesh_img 'orig_joint_cam':coco_joint_cam 'fit_joint_cam':smpl_joint_cam 'pose_param':smpl_pose 'shape_param':smpl_shape}<line_sep>meta_info={'orig_joint_valid':coco_joint_valid 'orig_joint_trunc':coco_joint_trunc 'fit_joint_trunc':smpl_joint_trunc 'fit_mesh_trunc':smpl_mesh_trunc 'is_valid_fit':float(is_valid_fit) 'is_3D':float(<false>)}<line_sep><return>inputs targets meta_info<block_end><else_stmt><block_start>inputs={'img':img}<line_sep>targets={}<line_sep>meta_info={'bb2img_trans':bb2img_trans}<line_sep><return>inputs targets meta_info<block_end><block_end><def_stmt>evaluate self outs cur_sample_idx<block_start>annots=self.datalist<line_sep>sample_num=len(outs)<line_sep>eval_result={}<for_stmt>n range(sample_num)<block_start>annot=annots[cur_sample_idx+n]<line_sep>out=outs[n]<line_sep># x,y: resize to input image space and perform bbox to image affine transform bb2img_trans=out['bb2img_trans']<line_sep>mesh_out_img=out['mesh_coord_img']<line_sep>mesh_out_img[: 0]=mesh_out_img[: 0]/cfg.output_hm_shape[2]<times>cfg.input_img_shape[1]<line_sep>mesh_out_img[: 1]=mesh_out_img[: 1]/cfg.output_hm_shape[1]<times>cfg.input_img_shape[0]<line_sep>mesh_out_img_xy1=np.concatenate((mesh_out_img[: :2] np.ones_like(mesh_out_img[: :1])) 1)<line_sep>mesh_out_img[: :2]=np.dot(bb2img_trans mesh_out_img_xy1.transpose(1 0)).transpose(1 0)[: :2]<line_sep># z: devoxelize and translate to absolute depth root_joint_depth=annot['root_joint_depth']<line_sep>mesh_out_img[: 2]=(mesh_out_img[: 2]/cfg.output_hm_shape[0]<times>2.-1)<times>(cfg.bbox_3d_size<times>1000/2)# change cfg.bbox_3d_size from meter to milimeter mesh_out_img[: 2]=mesh_out_img[: 2]+root_joint_depth<line_sep># camera back-projection cam_param=annot['cam_param']<line_sep>focal,princpt=cam_param['focal'] cam_param['princpt']<line_sep>mesh_out_cam=pixel2cam(mesh_out_img focal princpt)<if_stmt>cfg.stage<eq>'param'<block_start>mesh_out_cam=out['mesh_coord_cam']<block_end>vis=<false><if_stmt>vis<block_start>filename=annot['img_path'].split('/')[-1][:-4]+'_'+str(n)<line_sep>img=load_img(annot['img_path'])[: : ::-1]<line_sep>img=vis_mesh(img mesh_out_img 0.5)<line_sep>cv2.imwrite(filename+'.jpg' img)<line_sep>save_obj(mesh_out_cam self.smpl.face filename+'.obj')<block_end><block_end><return>eval_result<block_end><def_stmt>print_eval_result self eval_result<block_start><pass><block_end><block_end>
# # Copyright (c) 2013-present, <NAME> # All rights reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # <import_stmt>os<line_sep>""" Path to the Indic NLP Resources directory """<line_sep>INDIC_RESOURCES_PATH=''<def_stmt>init <block_start>""" Initialize the module. The following actions are performed: - Checks of INDIC_RESOURCES_PATH variable is set. If not, checks if it can beb initialized from INDIC_RESOURCES_PATH environment variable. If that fails, an exception is raised """<line_sep><global>INDIC_RESOURCES_PATH<try_stmt><block_start><if_stmt>INDIC_RESOURCES_PATH<eq>''<block_start>INDIC_RESOURCES_PATH=os.environ['INDIC_RESOURCES_PATH']<block_end><block_end><except_stmt>Exception<as>e<block_start><raise>IndicNlpException('INDIC_RESOURCES_PATH not set')<block_end><if_stmt>INDIC_RESOURCES_PATH<eq>''<block_start><raise>IndicNlpException('INDIC_RESOURCES_PATH not set')<block_end><block_end><def_stmt>get_resources_path <block_start>""" Get the path to the Indic NLP Resources directory """<line_sep><return>INDIC_RESOURCES_PATH<block_end><def_stmt>set_resources_path resources_path<block_start>""" Set the path to the Indic NLP Resources directory """<line_sep><global>INDIC_RESOURCES_PATH<line_sep>INDIC_RESOURCES_PATH=resources_path<block_end><class_stmt>IndicNlpException(Exception)<block_start>""" Exceptions thrown by Indic NLP Library components are instances of this class. 'msg' attribute contains exception details. """<def_stmt>__init__ self msg<block_start>self.msg=msg<block_end><def_stmt>__str__ self<block_start><return>repr(self.msg)<block_end><block_end>
<import_from_stmt>collections defaultdict<import_stmt>datetime<import_stmt>logging<import_stmt>webapp2<import_from_stmt>google.appengine.api memcache<import_from_stmt>google.appengine.ext ndb<import_stmt>tba_config<import_from_stmt>base_controller CacheableHandler<import_from_stmt>consts.award_type AwardType<import_from_stmt>consts.event_type EventType<import_from_stmt>consts.landing_type LandingType<import_from_stmt>consts.media_tag MediaTag<import_from_stmt>consts.media_type MediaType<import_from_stmt>database media_query<import_from_stmt>helpers.event_helper EventHelper<import_from_stmt>helpers.season_helper SeasonHelper<import_from_stmt>helpers.team_helper TeamHelper<import_from_stmt>helpers.firebase.firebase_pusher FirebasePusher<import_from_stmt>models.award Award<import_from_stmt>models.event Event<import_from_stmt>models.insight Insight<import_from_stmt>models.media Media<import_from_stmt>models.team Team<import_from_stmt>models.sitevar Sitevar<import_from_stmt>template_engine jinja2_engine<def_stmt>render_static page<block_start>memcache_key="main_%s"%page<line_sep>html=memcache.get(memcache_key)<if_stmt>html<is><none><block_start>html=jinja2_engine.render('%s.html'%page {})<if_stmt>tba_config.CONFIG["memcache"]<block_start>memcache.set(memcache_key html 86400)<block_end><block_end><return>html<block_end><def_stmt>handle_404 request response exception<block_start>response.write(render_static("404"))<line_sep>response.set_status(404)<block_end><def_stmt>handle_500 request response exception<block_start>logging.exception(exception)<line_sep>response.write(render_static("500"))<line_sep>response.set_status(500)<block_end><class_stmt>AvatarsHandler(CacheableHandler)<block_start>CACHE_VERSION=0<line_sep>CACHE_KEY_FORMAT="avatars_{}"<def_stmt>__init__ self *args **kw<block_start>super(AvatarsHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<block_end><def_stmt>get self year<block_start>year=int(year)<if_stmt>year<not><in>{2018 2019 2020}<block_start>self.abort(404)<block_end>self._partial_cache_key=self.CACHE_KEY_FORMAT.format(year)<line_sep>super(AvatarsHandler self).get(year)<block_end><def_stmt>_render self year<block_start>year=int(year)<line_sep>avatars=[]<line_sep>shards=memcache.get_multi(['{}avatars_{}'.format(year i)<for>i xrange(10)])<if_stmt>len(shards)<eq>10# If missing a shard, must refetch all <block_start><for_stmt>_,shard sorted(shards.items() key=<lambda>kv:kv[0])<block_start>avatars<augadd>shard<block_end><block_end><if_stmt><not>avatars<block_start>avatars_future=Media.query(Media.media_type_enum<eq>MediaType.AVATAR Media.year<eq>year).fetch_async()<line_sep>avatars=sorted(avatars_future.get_result() key=<lambda>a:int(a.references[0].id()[3:]))<line_sep>shards={}<line_sep>size=len(avatars)/10+1<for_stmt>i xrange(10)<block_start>start=i<times>size<line_sep>end=start+size<line_sep>shards['{}avatars_{}'.format(year i)]=avatars[start:end]<block_end>memcache.set_multi(shards 60<times>60<times>24)<block_end>self.template_values.update({'year':year 'avatars':avatars })<line_sep><return>jinja2_engine.render('avatars.html' self.template_values)<block_end><block_end><class_stmt>TwoChampsHandler(CacheableHandler)<block_start>CACHE_VERSION=0<line_sep>CACHE_KEY_FORMAT="two_champs_{}_{}"<def_stmt>__init__ self *args **kw<block_start>super(TwoChampsHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<line_sep>self._team_key_a=self.request.get('team_a' <none>)<line_sep>self._team_key_b=self.request.get('team_b' <none>)<line_sep>self._partial_cache_key=self.CACHE_KEY_FORMAT.format(self._team_key_a self._team_key_b)<block_end><def_stmt>_render self *args **kw<block_start>team_a=Team.get_by_id(self._team_key_a)<if>self._team_key_a<else><none><line_sep>team_b=Team.get_by_id(self._team_key_b)<if>self._team_key_b<else><none><line_sep>self.template_values.update({'team_a':team_a 'team_b':team_b })<line_sep><return>jinja2_engine.render('2champs.html' self.template_values)<block_end><block_end><class_stmt>ContactHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="main_contact"<def_stmt>__init__ self *args **kw<block_start>super(ContactHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('contact.html' self.template_values)<block_end><block_end><class_stmt>PrivacyHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="main_privacy"<def_stmt>__init__ self *args **kw<block_start>super(PrivacyHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('privacy.html' self.template_values)<block_end><block_end><class_stmt>HashtagsHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="main_hashtags"<def_stmt>__init__ self *args **kw<block_start>super(HashtagsHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('hashtags.html' self.template_values)<block_end><block_end><class_stmt>FIRSTHOFHandler(CacheableHandler)<block_start>CACHE_VERSION=0<line_sep>CACHE_KEY_FORMAT="main_first_hof"<def_stmt>__init__ self *args **kw<block_start>super(FIRSTHOFHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start>awards_future=Award.query(Award.award_type_enum<eq>AwardType.CHAIRMANS Award.event_type_enum<eq>EventType.CMP_FINALS).fetch_async()<line_sep>teams_by_year=defaultdict(list)<for_stmt>award awards_future.get_result()<block_start><for_stmt>team_key award.team_list<block_start>teams_by_year[award.year].append((team_key.get_async() award.event.get_async() award media_query.TeamTagMediasQuery(team_key.id() MediaTag.CHAIRMANS_VIDEO).fetch_async() media_query.TeamTagMediasQuery(team_key.id() MediaTag.CHAIRMANS_PRESENTATION).fetch_async() media_query.TeamTagMediasQuery(team_key.id() MediaTag.CHAIRMANS_ESSAY).fetch_async() ))<block_end><block_end>teams_by_year=sorted(teams_by_year.items() key=<lambda>(k v):-k)<for_stmt>_,tea teams_by_year<block_start>tea.sort(key=<lambda>x:x[1].get_result().start_date)<block_end>self.template_values.update({'teams_by_year':teams_by_year })<line_sep><return>jinja2_engine.render('hof.html' self.template_values)<block_end><block_end><class_stmt>ThanksHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="main_thanks"<def_stmt>__init__ self *args **kw<block_start>super(ThanksHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('thanks.html' self.template_values)<block_end><block_end><class_stmt>OprHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="main_opr"<def_stmt>__init__ self *args **kw<block_start>super(OprHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('opr.html' self.template_values)<block_end><block_end><class_stmt>PredictionsHandler(CacheableHandler)<block_start>CACHE_VERSION=0<line_sep>CACHE_KEY_FORMAT="main_predictions"<def_stmt>__init__ self *args **kw<block_start>super(PredictionsHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('predictions.html' self.template_values)<block_end><block_end><class_stmt>SearchHandler(webapp2.RequestHandler)<block_start><def_stmt>get self<block_start><try_stmt><block_start>q=self.request.get("q")<line_sep>logging.info("search query: %s"%q)<if_stmt>q.isdigit()<block_start>team_id="frc%s"%int(q)<line_sep>team=Team.get_by_id(team_id)<if_stmt>team<block_start>self.redirect(team.details_url)<line_sep><return><none><block_end><block_end><elif_stmt>q[:4].isdigit()# Check for event key <block_start>event=Event.get_by_id(q)<if_stmt>event<block_start>self.redirect(event.details_url)<line_sep><return><none><block_end><block_end><else_stmt># Check for event short <block_start>year=datetime.datetime.now().year# default to current year event=Event.get_by_id('{}{}'.format(year q))<if_stmt>event<block_start>self.redirect(event.details_url)<line_sep><return><none><block_end><block_end><block_end><except_stmt>Exception e<block_start>logging.warning("warning: %s"%e)<block_end><finally_stmt><block_start>self.response.out.write(render_static("search"))<block_end><block_end><block_end><class_stmt>WebcastsHandler(CacheableHandler)<block_start>CACHE_VERSION=2<line_sep>CACHE_KEY_FORMAT="main_webcasts"<def_stmt>__init__ self *args **kw<block_start>super(WebcastsHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start>year=datetime.datetime.now().year<line_sep>event_keys=Event.query(Event.year<eq>year).order(Event.start_date).fetch(500 keys_only=<true>)<line_sep>events=ndb.get_multi(event_keys)<line_sep>self.template_values.update({'events':events 'year':year })<line_sep><return>jinja2_engine.render('webcasts.html' self.template_values)<block_end><block_end><class_stmt>ApiWriteHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="api_write"<def_stmt>__init__ self *args **kw<block_start>super(ApiWriteHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('apiwrite.html' self.template_values)<block_end><block_end><class_stmt>BrandHandler(CacheableHandler)<block_start>CACHE_VERSION=1<line_sep>CACHE_KEY_FORMAT="main_brand"<def_stmt>__init__ self *args **kw<block_start>super(BrandHandler self).__init__(*args **kw)<line_sep>self._cache_expiration=60<times>60<times>24<times>7<block_end><def_stmt>_render self *args **kw<block_start><return>jinja2_engine.render('brand.html' self.template_values)<block_end><block_end>
<import_from_stmt>abc abstractmethod ABCMeta<class_stmt>ArchiveName(metaclass=ABCMeta)<block_start>_vol_fill=<false><line_sep>@abstractmethod<def_stmt>get_chapter_index self<block_start><pass><block_end><def_stmt>get_archive_name self<arrow>str<block_start>idx=self.get_chapter_index()<line_sep>self._vol_fill=<true><line_sep><return>self.normal_arc_name({'vol':idx.split('-')})<block_end><def_stmt>normal_arc_name self idx<arrow>str<block_start><if_stmt>isinstance(idx (str int))<block_start>idx=[idx]<block_end><if_stmt>isinstance(idx list)<block_start>self._vol_fill=<true><line_sep><return>self.__normal_name_list(idx)<block_end><if_stmt>isinstance(idx dict)<block_start><return>self.__normal_name_dict(idx)<block_end><raise>DeprecationWarning('Wrong arc name type: %s'%type(idx))<block_end><def_stmt>__normal_name_dict self idx:dict<arrow>str<block_start>vol=idx.get('vol' <none>)<line_sep>ch=idx.get('ch' <none>)<line_sep>result=''<if_stmt>vol<block_start><if_stmt>isinstance(vol str)<block_start>vol=[vol]<block_end>result=self.__normal_name_list(vol)<block_end><if_stmt>ch<block_start><if_stmt>vol<block_start>result<augadd>'-'<block_end>result<augadd>'ch_'+self.__fill(ch)<block_end><if_stmt>self._with_manga_name<block_start>name=self._params.get('name' '')<if_stmt><not>len(name)<block_start>name=self.manga_name<block_end>result='%s-%s'%(name result)<block_end><return>result<block_end><def_stmt>__normal_name_list self idx:list<arrow>str<block_start>fmt='vol_{:0>3}'<if_stmt>len(idx)<g>1<block_start>fmt<augadd>'-{}'<times>(len(idx)-1)<block_end><elif_stmt>self._vol_fill<and>self._zero_fill<block_start>idx.append('0')<line_sep>fmt<augadd>'-{}'<block_end><return>fmt.format(*idx)<block_end>@staticmethod<def_stmt>__fill var fmt:str='-{}'<arrow>str<block_start><if_stmt>isinstance(var str)<block_start>var=[var]<block_end><return>(fmt<times>len(var)).format(*var).lstrip('-')<block_end><block_end>
<import_from_stmt>.home HomeView timeline<line_sep>__all__=["HomeView" "timeline"]<line_sep>
<import_stmt>numbers<import_stmt>re<import_from_stmt>normality stringify<import_from_stmt>dateutil.parser parse<class_stmt>ContextCheck(object)<block_start><def_stmt>__init__ self context<block_start>self.context=context<block_end><def_stmt>shout self msg strict=<false> *args<block_start><if_stmt>strict<block_start><raise>ValueError(msg%args)<block_end><else_stmt><block_start>self.context.log.info(msg *args)<block_end><block_end><def_stmt>is_not_empty self value strict=<false><block_start>"""if value is not empty"""<line_sep>value=stringify(value)<if_stmt>value<is><not><none><block_start><return><block_end>self.shout("Value %r is empty" strict value)<block_end><def_stmt>is_numeric self value strict=<false><block_start>"""if value is numeric"""<line_sep>value=stringify(value)<if_stmt>value<is><not><none><block_start><if_stmt>value.isnumeric()<block_start><return><block_end><block_end>self.shout("value %r is not numeric" strict value)<block_end><def_stmt>is_integer self value strict=<false><block_start>"""if value is an integer"""<if_stmt>value<is><not><none><block_start><if_stmt>isinstance(value numbers.Number)<block_start><return><block_end><block_end>value=stringify(value)<if_stmt>value<is><not><none><and>value.isnumeric()<block_start><return><block_end>self.shout("value %r is not an integer" strict value)<block_end><def_stmt>match_date self value strict=<false><block_start>"""if value is a date"""<line_sep>value=stringify(value)<try_stmt><block_start>parse(value)<block_end><except_stmt>Exception<block_start>self.shout("Value %r is not a valid date" strict value)<block_end><block_end><def_stmt>match_regexp self value q strict=<false><block_start>"""if value matches a regexp q"""<line_sep>value=stringify(value)<line_sep>mr=re.compile(q)<if_stmt>value<is><not><none><block_start><if_stmt>mr.match(value)<block_start><return><block_end><block_end>self.shout("%r not matching the regexp %r" strict value q)<block_end><def_stmt>has_length self value q strict=<false><block_start>"""if value has a length of q"""<line_sep>value=stringify(value)<if_stmt>value<is><not><none><block_start><if_stmt>len(value)<eq>q<block_start><return><block_end><block_end>self.shout("Value %r not matching length %r" strict value q)<block_end><def_stmt>must_contain self value q strict=<false><block_start>"""if value must contain q"""<if_stmt>value<is><not><none><block_start><if_stmt>value.find(q)<ne>-1<block_start><return><block_end><block_end>self.shout("Value %r does not contain %r" strict value q)<block_end><block_end>
# Copyright (c) 2020 Graphcore Ltd. All rights reserved <import_from_stmt>popgen NonTensorValue Value onnx poptorch<import_from_stmt>popgen.helpers empty_initializer<line_sep># no_tensor_braces(v): # # Modifiers for values that take tensors without initializer list braces # Parameters: # v - the input value <def_stmt>no_tensor_braces v<block_start>v.tensor_braces=<false><line_sep><return>v<block_end># def check_operator_signature(value, signatures) # # Verify an operator has correct signature # Parameters: # value - the operator # signatures - signatures' dictionary <def_stmt>check_operator_signature value signatures<block_start><assert_stmt>value.op<in>signatures str(value.op)+" is not a supported operator"<line_sep>actual_args=value.args<line_sep>expected_args=signatures[value.op]<line_sep># check non-tensor arguments first_non_tensor=-1<if_stmt>expected_args[0]<eq>'Args'<block_start><for_stmt>i,arg enumerate(actual_args)<block_start><if_stmt>arg.op<eq>'empty_initializer'<block_start><continue><block_end><if_stmt>isinstance(arg NonTensorValue)<block_start>first_non_tensor=i<line_sep><break><block_end><block_end><assert_stmt>first_non_tensor<ne>0 'Expecting at least 1 tensor '+'argument for '+value.op<block_end># no non-tensor arguments <if_stmt>first_non_tensor<eq>-1<block_start><return>value<block_end># check non-tensor arguments expected_args=expected_args[1:]<line_sep>actual_args=actual_args[first_non_tensor:]<line_sep># assume any missing arguments are optional <for_stmt>i range(1 len(expected_args)-len(actual_args))<block_start>actual_args.append('None')<block_end><for_stmt>i,arg enumerate(actual_args)<block_start><if_stmt>isinstance(arg Value)<block_start>arg=arg.op<block_end><assert_stmt>arg<in>expected_args[i] 'Incorrect operand '+str(i)+'for '+value.op+'. Got '+arg+' , expecting '+'one of: '+str(expected_args[i])<block_end><return>value<block_end># Factory class for creating popArt ops. Operators are created # on the fly based on spelling of attributes. <class_stmt>OperatorFactory<block_start><def_stmt>__getattr__ self name<block_start><if_stmt>name<in>onnx.signatures<block_start><return><lambda>*args:check_operator_signature(Value(name list(args)) onnx.signatures)<block_end><if_stmt>name<in>poptorch.signatures<block_start><return><lambda>*args:check_operator_signature(Value(name list(args)) poptorch.signatures)<block_end><raise>ValueError(name+" is not a supported operator")<block_end><def_stmt>cast self t ty<block_start>value=no_tensor_braces(Value('cast' [t ty]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>internalCast self t ty<block_start>value=no_tensor_braces(Value('internalCast' [t ty]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>constantPad self x l c<block_start>value=no_tensor_braces(Value('constantPad' [x l c]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>edgePad self t l<block_start>value=no_tensor_braces(Value('edgePad' [t l]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>printIpuTensor self t s<block_start>value=no_tensor_braces(Value('printIpuTensor' [t s]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>callCpuOp self t s n<block_start>value=no_tensor_braces(Value('callCpuOp' [t s n]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>transpose self t<block_start>value=Value('transpose' [t empty_initializer()])<line_sep>check_operator_signature(value onnx.signatures)<line_sep><return>value<block_end><def_stmt>randomNormal self x shape high low scalar_type=<none><block_start>args=[x shape high low]<if_stmt>scalar_type<is><not><none><block_start>args<augadd>[scalar_type]<block_end>value=Value('randomNormal' args)<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>randomUniform self x shape high low scalar_type=<none><block_start>args=[x shape high low]<if_stmt>scalar_type<is><not><none><block_start>args<augadd>[scalar_type]<block_end>value=no_tensor_braces(Value('randomUniform' args))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>recomputationCheckpoint self x<block_start>value=no_tensor_braces(Value('recomputationCheckpoint' [x]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>reflectionPad self t l<block_start>value=no_tensor_braces(Value('reflectionPad' [t l]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>setAvailableMemory self x y<block_start>value=no_tensor_braces(Value('setAvailableMemory' [x y]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>setMatMulSerialization self x s a b<block_start>value=no_tensor_braces(Value('setMatMulSerialization' [x s a b]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><def_stmt>endForLoop self output inputs trip_count<block_start>value=no_tensor_braces(Value('endForLoop' [output inputs trip_count]))<line_sep>check_operator_signature(value poptorch.signatures)<line_sep><return>value<block_end><block_end>op=OperatorFactory()<line_sep>
<import_from_stmt>typing List<import_from_stmt>typing Dict<import_from_stmt>datetime datetime<import_from_stmt>botocore.paginate Paginator<class_stmt>BatchGetTraces(Paginator)<block_start><def_stmt>paginate self TraceIds:List PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetGroups(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetSamplingRules(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetSamplingStatisticSummaries(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetServiceGraph(Paginator)<block_start><def_stmt>paginate self StartTime:datetime EndTime:datetime GroupName:str=<none> GroupARN:str=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetTimeSeriesServiceStatistics(Paginator)<block_start><def_stmt>paginate self StartTime:datetime EndTime:datetime GroupName:str=<none> GroupARN:str=<none> EntitySelectorExpression:str=<none> Period:int=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetTraceGraph(Paginator)<block_start><def_stmt>paginate self TraceIds:List PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>GetTraceSummaries(Paginator)<block_start><def_stmt>paginate self StartTime:datetime EndTime:datetime TimeRangeType:str=<none> Sampling:bool=<none> SamplingStrategy:Dict=<none> FilterExpression:str=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>veriloggen.core.vtypes<as>vtypes<import_stmt>veriloggen.core.module<as>module<def_stmt>mkMultiplierCore index lwidth=32 rwidth=32 lsigned=<true> rsigned=<true> depth=6<block_start><if_stmt>lwidth<le>0<block_start><raise>ValueError("data width must be greater than 0.")<block_end><if_stmt>rwidth<le>0<block_start><raise>ValueError("data width must be greater than 0.")<block_end><if_stmt>depth<l>2<block_start><raise>ValueError("depth must be greater than 1.")<block_end>retwidth=lwidth+rwidth<line_sep>m=module.Module('multiplier_core_%d'%index)<line_sep>clk=m.Input('CLK')<line_sep>update=m.Input('update')<line_sep>a=m.Input('a' lwidth)<line_sep>b=m.Input('b' rwidth)<line_sep>c=m.Output('c' retwidth)<line_sep>_a=m.Reg('_a' lwidth signed=lsigned)<line_sep>_b=m.Reg('_b' rwidth signed=rsigned)<line_sep>_mul=m.Wire('_mul' retwidth signed=<true>)<line_sep>_pipe_mul=[m.Reg('_pipe_mul%d'%i retwidth signed=<true>)<for>i range(depth-1)]<line_sep>__a=_a<line_sep>__b=_b<if_stmt><not>lsigned<block_start>__a=vtypes.SystemTask('signed' vtypes.Cat(vtypes.Int(0 width=1) _a))<block_end><if_stmt><not>rsigned<block_start>__b=vtypes.SystemTask('signed' vtypes.Cat(vtypes.Int(0 width=1) _b))<block_end>m.Assign(_mul(__a<times>__b))<line_sep>m.Assign(c(_pipe_mul[depth-2]))<line_sep>m.Always(vtypes.Posedge(clk))(vtypes.If(update)(_a(a) _b(b) _pipe_mul[0](_mul) [_pipe_mul[i](_pipe_mul[i-1])<for>i range(1 depth-1)]))<line_sep><return>m<block_end><def_stmt>mkMultiplier index lwidth=32 rwidth=32 lsigned=<true> rsigned=<true> depth=6<block_start><if_stmt>lwidth<le>0<block_start><raise>ValueError("data width must be greater than 0.")<block_end><if_stmt>rwidth<le>0<block_start><raise>ValueError("data width must be greater than 0.")<block_end><if_stmt>depth<l>2<block_start><raise>ValueError("depth must be greater than 1.")<block_end>retwidth=lwidth+rwidth<line_sep>mult=mkMultiplierCore(index lwidth rwidth lsigned rsigned depth)<line_sep>m=module.Module('multiplier_%d'%index)<line_sep>clk=m.Input('CLK')<line_sep>rst=m.Input('RST')<line_sep>update=m.Input('update')<line_sep>enable=m.Input('enable')<line_sep>valid=m.Output('valid')<line_sep>a=m.Input('a' lwidth)<line_sep>b=m.Input('b' rwidth)<line_sep>c=m.Output('c' retwidth)<line_sep>valid_reg=[m.Reg('valid_reg%d'%i)<for>i range(depth)]<line_sep>m.Assign(valid(valid_reg[depth-1]))<line_sep>m.Always(vtypes.Posedge(clk))(vtypes.If(rst)([valid_reg[i](0)<for>i range(depth)]).Else(vtypes.If(update)(valid_reg[0](enable) [valid_reg[i](valid_reg[i-1])<for>i range(1 depth)])))<line_sep>ports=[('CLK' clk) ('update' update) ('a' a) ('b' b) ('c' c)]<line_sep>m.Instance(mult 'mult' ports=ports)<line_sep><return>m<block_end># global multiplier count index_count=0<def_stmt>get_mul lwidth=32 rwidth=32 lsigned=<true> rsigned=<true> depth=6<block_start><global>index_count<line_sep>mul=mkMultiplier(index_count lwidth rwidth lsigned rsigned depth)<line_sep>index_count<augadd>1<line_sep><return>mul<block_end><def_stmt>reset <block_start><global>index_count<line_sep>index_count=0<block_end>
<import_from_stmt>typing List Optional Union<import_from_stmt>fedot.core.dag.node_operator NodeOperator<class_stmt>GraphNode<block_start>""" Class for node definition in the DAG-based structure :param nodes_from: parent nodes which information comes from :param content: dict for the content in node The possible parameters are: 'name' - name (str) or object that performs actions in this node 'params' - dictionary with additional information that is used by the object in the 'name' field (e.g. hyperparameters values). """<def_stmt>__init__ self content:Union[dict str] nodes_from:Optional[List['GraphNode']]=<none><block_start>self.nodes_from=nodes_from<line_sep># Wrap string into dict if it is necessary <if_stmt>isinstance(content str)<block_start>content={'name':content}<block_end>self.content=content<line_sep>self._operator=NodeOperator(self)<block_end><def_stmt>__str__ self<block_start><return>str(self.content['name'])<block_end><def_stmt>__repr__ self<block_start><return>self.__str__()<block_end>@property<def_stmt>descriptive_id self<block_start><return>self._operator.descriptive_id()<block_end><def_stmt>ordered_subnodes_hierarchy self visited=<none><arrow>List['GraphNode']<block_start><return>self._operator.ordered_subnodes_hierarchy(visited)<block_end>@property<def_stmt>distance_to_primary_level self<block_start><return>self._operator.distance_to_primary_level()<block_end><block_end>
<import_from_stmt>fastapi FastAPI<import_from_stmt>fastapi.routing APIRoute<line_sep>app=FastAPI()<line_sep>@app.get("/items/")<async_keyword><def_stmt>read_items <block_start><return>[{"item_id":"Foo"}]<block_end><def_stmt>use_route_names_as_operation_ids app:FastAPI<arrow><none><block_start>""" Simplify operation IDs so that generated API clients have simpler function names. Should be called only after all routes have been added. """<for_stmt>route app.routes<block_start><if_stmt>isinstance(route APIRoute)<block_start>route.operation_id=route.name<block_end><block_end><block_end># in this case, 'read_items' use_route_names_as_operation_ids(app)<line_sep>
<import_stmt>os sys<import_stmt>argparse<import_stmt>random<import_stmt>time pytz<import_from_stmt>datetime datetime timezone<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>h5py<import_from_stmt>torch.utils.data Dataset<import_from_stmt>torch.utils.data DataLoader<line_sep>#from text_utils import TextEncoder # This is my version sys.path.append('./pytorch-openai-transformer-lm')<import_from_stmt>model_pytorch TransformerModel load_openai_pretrained_model DEFAULT_CONFIG<import_from_stmt>model_pytorch Conv1D Block<import_from_stmt>opt OpenAIAdam<import_from_stmt>utils ResultLogger<line_sep>pretrained_model_path=os.path.join('.' 'finetune-transformer-lm' 'model')<line_sep># TODO : Fn to get list of relationship_types and relationship_templates for each type # Props to : https://github.com/rasbt/deep-learning-book/blob/master/code/model_zoo/pytorch_ipynb/custom-data-loader-csv.ipynb <class_stmt>Hdf5Dataset(Dataset)<block_start>"""Custom Dataset for loading entries from HDF5 databases"""<def_stmt>__init__ self h5_path vocab_count valid_indices=<none># transform=None, <block_start>self.h5f=h5py.File(h5_path 'r')<line_sep>features=self.h5f['features']<line_sep>self.valid_indices=valid_indices<if_stmt>valid_indices<is><none><block_start>self.num_entries=features.shape[0]<block_end><else_stmt><block_start>self.num_entries=len(valid_indices)<block_end>#self.transform = transform self.n_ctx=features.shape[1]<line_sep>self.postitional_encoder=np.arange(vocab_count vocab_count+self.n_ctx)<block_end><def_stmt>__getitem__ self index<block_start><if_stmt>self.valid_indices<is><not><none># find on-disk index <block_start>index=self.valid_indices[index]<block_end>features=self.h5f['features'][index]<line_sep>labels=self.h5f['labels'][index].astype(np.int64)<line_sep>deps=self.h5f['deps'][index].astype(np.int64)<line_sep># Find the token_clf #token_clf_pos = np.nonzero( features==token_clf )[-1].sum() # This is zero if it is not found #if token_clf_pos>=features.shape[0]-1: # #print("token_clf_pos right at end, index=", index, token_clf_pos, features.shape[0]-1) # token_clf_pos=features.shape[0]-2 # Need to have this location, and the next one #if self.transform is not None: # features = self.transform(features) #xmb[:, :, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx) #xmb[:, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx) # This is a single row, of batch=1 #features_with_positions = np.stack( [ features, self.postitional_encoder ], axis=1 ) features_with_positions=np.stack([features self.postitional_encoder.copy()] axis=1)# May be safer when multithreaded? #print(features.shape, features_with_positions.shape) # (128,) (128, 2) #unanswerable=False #if 3 not in list(labels): # There is no answer to this question # unanswerable=True #if 4 not in list(labels): # There is no answer to this question # unanswerable=True #print(token_clf_pos, unanswerable) #if unanswerable: # if False: # labels[0]=4 # end is before start # labels[1]=3 # if True: # labels[token_clf_pos ] = 4 # end is before start # labels[token_clf_pos+1] = 3 # https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.clip.html np.clip(deps 0 self.n_ctx-1 out=deps)<line_sep><return>features_with_positions labels deps<block_end><def_stmt>__len__ self<block_start><return>self.num_entries<block_end><def_stmt>close self<block_start>self.h5f.close()<block_end><block_end><class_stmt>StepwiseClassifierModel(nn.Module)<block_start>""" Transformer with stepwise classifier(s) """<def_stmt>__init__ self cfg n_classifier=<none> one_hot=<true> vocab_count=<none> n_ctx=128 extra_blocks=1# 40990 <block_start>super(StepwiseClassifierModel self).__init__()<line_sep>self.n_embd=cfg.n_embd<line_sep>self.n_ctx=n_ctx<line_sep>self.n_classifier=n_classifier<line_sep>self.extra_blocks=extra_blocks<line_sep>self.transformer=TransformerModel(cfg vocab=vocab_count+n_ctx n_ctx=n_ctx)<line_sep>#block = Block(n_ctx, cfg, scale=True) #self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)]) ## Add the attention pointer idea <if_stmt>extra_blocks<eq>1# Just set to ==1 for now # First : Add an additional transformer layer <block_start>self.full_block=Block(n_ctx cfg scale=<true>)<line_sep># BBBUUUTTT :: force it into full-attentional mode :: #self.full_block.attn.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) self.full_block.attn.register_buffer('b' (torch.ones(n_ctx n_ctx)).view(1 1 n_ctx n_ctx))<block_end>self.stepwise_dropout=nn.Dropout(cfg.clf_pdrop)<line_sep>self.stepwise_classifier=Conv1D(n_classifier 1 self.n_embd)<line_sep>self.attn_dropout=nn.Dropout(cfg.attn_pdrop)<line_sep>self.c_attn=Conv1D(self.n_embd<times>2 1 self.n_embd)<block_end><def_stmt>forward self x# x is the input text ## NO : x ~ np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32) # This is for their 0 vs 1 model # x ~ np.zeros((n_batch, n_ctx, 2), dtype=np.int32) # This is more normal use-case # x[..., -1] is for [input_sequence, positions] <block_start>h=self.transformer(x)# These are the transformers embeddings (n_batch, n_ctx, n_embd) <if_stmt>self.extra_blocks<eq>1# This can look forwards too <block_start>h=self.full_block(h)<block_end># for classification step-wise h_stepwise_input=self.stepwise_dropout(h)<line_sep>task_logits=self.stepwise_classifier(h_stepwise_input).permute(0 2 1)# CrossEntropy expects classifier to be in second position #print("task_logits.size()=", task_logits.size() ) # task_logits.size()= torch.Size([8, 5, 128]) (n_batch, n_classifier, n_ctx) # ~ Attention.forward h_attn_input=self.stepwise_dropout(h)<line_sep>attn=self.c_attn(h_attn_input)<line_sep># reshape for query and key query,key=attn.split(self.n_embd dim=2)<line_sep># ~ Attention.split_heads(self, x, k=False): #new_h_shape = h.size()[:-1] + (1 , h.size(-1)) # Insert an extra dimension #query = query.view(*new_h_shape).permute(0, 2, 1, 3) #key = key.view( *new_h_shape).permute(0, 2, 3, 1) #query = query.view(*new_h_shape).permute(0, 1, 3) # Above can be simplified, since we don't need to get too fancy... key=key.permute(0 2 1)<line_sep>#print( "query.size()=", query.size()) # query.size()= torch.Size([8, 128, 768]) = batch, time_step, matcher #print( "key.size()=", key.size()) # key.size()= torch.Size([8, 768, 128]) = batch, matcher, time_step # ~ Attention._attn(self, q, k, v): w=torch.matmul(query key)<if_stmt><true># self.scale: <block_start>w=w/np.sqrt(self.n_embd)<block_end># simple scaling, since we're adding up a dot product # Now, we have a weighting matrix (logits) over the different locations #w = nn.Softmax(dim=-1)(w) # Don't do this here, since we use pure logits with the loss_fn #print("w.size()=", w.size()) # w.size()= torch.Size([8, 128, 128]) ( thinking about it : batch, time_step, position_score ) attn_logits=w.permute(0 2 1)# CrossEntropy expects classifier to be in second position ( batch, position_score, time_step ) <return>task_logits attn_logits<block_end><block_end><def_stmt>run_predictions test_loader=<none> output_file=<none><block_start>print("run_predictions() -> %s"%(output_file ))<line_sep>model_stepwise.eval()<line_sep>labels_arr,deps_arr=[] []<for_stmt>idx,(features labels deps) enumerate(test_loader)#features, labels, deps = features.to(device), labels.to(device), deps.to(device) <block_start>features=features.to(device)<line_sep>out_class_logits,out_deps_logits=model_stepwise(features)<line_sep># Ok, so now what... # Just save off the argmax(out_class_logits) and argmax(out_deps_logits) _,labels_predicted=torch.max(out_class_logits 1)<line_sep>_,deps_predicted=torch.max(out_deps_logits 1)<line_sep># print( labels_predicted.shape, deps_predicted.shape ) # on P100s : torch.Size([512, 32]) torch.Size([512, 32]) labels_arr.append(labels_predicted.detach().cpu().numpy().astype(np.uint8))<line_sep>deps_arr.append(deps_predicted.detach().cpu().numpy().astype(np.uint8))<if_stmt>(idx+1)%10<eq>0<block_start>print('%.1f%% of predictions'%(idx/float(len(test_loader))<times>100 ) end='\r')<line_sep>#break <block_end><block_end>#np.savez(output_file, labels=np.array( labels_arr ), deps=np.array( deps_arr ), ) np.savez(output_file labels=np.vstack(labels_arr) deps=np.vstack(deps_arr) )<line_sep>""" import numpy as np a=np.array([[1,2,3],[4,5,6]]) b=np.array([[7,6,5],[4,8,6]]) np.vstack([a,b]) #array([[1, 2, 3], # [4, 5, 6], # [7, 6, 5], # [4, 8, 6]]) """<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--checkpoint" default=<none> type=str help="model checkpoint path to restart training")<line_sep>parser.add_argument('--path' type=str default='./bist-parser/preprocess/output')<line_sep>parser.add_argument('--stub' type=str default='all' help="Description")<line_sep>parser.add_argument('--log_dir' type=str default='log/')<line_sep>parser.add_argument('--seed' type=int default=42)<line_sep>parser.add_argument('--max_grad_norm' type=int default=1)<line_sep>parser.add_argument('--l2' type=float default=0.01)<line_sep>parser.add_argument('--vector_l2' action='store_true')<line_sep>parser.add_argument('--opt' type=str default='adam')<line_sep>parser.add_argument('--lr' type=float default=6.25e-5)<line_sep>parser.add_argument('--lr_warmup' type=float default=0.002)<line_sep>parser.add_argument('--lr_schedule' type=str default='warmup_linear')<line_sep>parser.add_argument('--b1' type=float default=0.9)<line_sep>parser.add_argument('--b2' type=float default=0.999)<line_sep>parser.add_argument('--e' type=float default=1e-8)<line_sep>parser.add_argument('--n_transfer' type=int default=12)<line_sep>parser.add_argument('--lm_coef' type=float default=0.5)<line_sep>#parser.add_argument('--n_valid', type=int, default=374) # Standard for pre-trained model START parser.add_argument('--n_embd' type=int default=768)# This is the internal feature width parser.add_argument('--n_head' type=int default=12)<line_sep>parser.add_argument('--n_layer' type=int default=12)<line_sep>parser.add_argument('--embd_pdrop' type=float default=0.1)<line_sep>parser.add_argument('--attn_pdrop' type=float default=0.1)<line_sep>parser.add_argument('--resid_pdrop' type=float default=0.1)<line_sep>parser.add_argument('--clf_pdrop' type=float default=0.1)<line_sep>parser.add_argument('--afn' type=str default='gelu')<line_sep># Standard for pre-trained model END parser.add_argument('--encoder_path' type=str default=pretrained_model_path+'/encoder_bpe_40000.json')<line_sep>parser.add_argument('--bpe_path' type=str default=pretrained_model_path+'/vocab_40000.bpe')<line_sep>parser.add_argument('--relation_hdf5' type=str default='coco_train.conll_v32.hdf5')<line_sep>parser.add_argument('--tokens_special' type=int default=3)# Printed out by relation_split_to_hdf5 parser.add_argument('--token_clf' type=int default=40480)# Printed out by relation_split_to_hdf5 parser.add_argument('--vocab_count' type=int default=40481)# Printed out by relation_split_to_hdf5 #parser.add_argument('--n_ctx', type=int, default=32) # Max length of input texts in bpes - get this from input hdf5 shapes # class : 0=IGNORE, 1=same, 2=SUBJECT-OBJECT, 3=VERB'S-OBJECT, 4=ATTRIB, 5=VERB parser.add_argument('--n_classes' type=int default=6)# #label classes = len({0, 1, 2,3, 4, 5}) parser.add_argument('--batch_size_per_gpu' type=int default=128)# 9.6Gb on TitanX parser.add_argument('--n_epoch' type=int default=4)<line_sep>parser.add_argument("--tz" type=str default='Asia/Singapore' help="Timezone for local finish time estimation")<line_sep>parser.add_argument('--dep_fac' type=float default=5.0)<line_sep>parser.add_argument('--extra_blocks' type=int default=1)<line_sep>parser.add_argument('--predict' action='store_true')<line_sep>args=parser.parse_args()<line_sep>print(args)<line_sep>random.seed(args.seed)<line_sep>np.random.seed(args.seed)<line_sep>torch.manual_seed(args.seed)<line_sep>torch.cuda.manual_seed_all(args.seed)<line_sep>tz=pytz.timezone(args.tz)<line_sep>device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>n_gpu=torch.cuda.device_count()<line_sep>print("device" device "n_gpu" n_gpu)<line_sep>token_clf=args.token_clf<line_sep>relation_hdf5=os.path.join(args.path args.relation_hdf5)<line_sep>train_dataset=Hdf5Dataset(h5_path=relation_hdf5 vocab_count=args.vocab_count)<line_sep>train_size=len(train_dataset)<line_sep>n_ctx=train_dataset.n_ctx<line_sep>batch_size=args.batch_size_per_gpu<line_sep>n_gpus=torch.cuda.device_count()<if_stmt>n_gpus<g>1# https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html <block_start>batch_size<augmul>n_gpus<block_end>n_updates_total=(train_size<floordiv>batch_size)<times>args.n_epoch<line_sep>model_stepwise=StepwiseClassifierModel(args n_classifier=args.n_classes n_ctx=n_ctx vocab_count=args.vocab_count extra_blocks=args.extra_blocks)<line_sep>model_opt=OpenAIAdam(model_stepwise.parameters() lr=args.lr schedule=args.lr_schedule warmup=args.lr_warmup t_total=n_updates_total b1=args.b1 b2=args.b2 e=args.e l2=args.l2 ector_l2=args.vector_l2 max_grad_norm=args.max_grad_norm)<line_sep>epoch_start,epoch_max,loss_best=-1 args.n_epoch <none><if_stmt>args.checkpoint<is><none><block_start>load_openai_pretrained_model(model_stepwise.transformer n_special=args.tokens_special n_ctx=n_ctx # n_ctx adjusts embedding size to include positional path=pretrained_model_path+'/' path_names=os.path.join('.' 'orig' 'pytorch-openai-transformer-lm')+'/' )<block_end>model_stepwise.to(device)<if_stmt>torch.cuda.device_count()<g>1# https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html <block_start>print("Let's use" torch.cuda.device_count() "GPUs!")<line_sep>model_stepwise=nn.DataParallel(model_stepwise)<block_end>os.makedirs('./checkpoints' exist_ok=<true>)<if_stmt>args.checkpoint<is><not><none><block_start>checkpoint=torch.load(args.checkpoint map_location=<lambda>storage loc:storage)<line_sep>epoch_start=checkpoint['epoch']<line_sep>#from collections import OrderedDict #def fix_dict(state_dict): # new_state_dict = OrderedDict() # for k, v in state_dict.items(): # name = k # if name.startswith('module.'): # name = k[7:] # remove 'module.' of dataparallel # new_state_dict[name]=v # return new_state_dict # #model.load_state_dict(new_state_dict) model_stepwise.load_state_dict(checkpoint['model'])<line_sep>model_opt.load_state_dict(checkpoint['optimizer'])<line_sep>#lr_scheduler = get_lr_scheduler(optimizer) #lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) print("Loaded %s - assuming epoch_now=%d"%(args.checkpoint epoch_start ))<block_end><if_stmt>args.predict# Predict out results for all the 'relation_hdf5' instead (batch_size=1 not efficient, but 'sure') #test_loader = DataLoader(dataset=train_dataset, batch_size=1, shuffle=False) # , num_workers=1 # Predict out results for all the 'relation_hdf5' instead <block_start>test_loader=DataLoader(dataset=train_dataset batch_size=batch_size shuffle=<false>)# , num_workers=1 run_predictions(test_loader=test_loader output_file="%s_%s.npz"%(relation_hdf5 args.stub))<line_sep>#run_predictions(test_loader=test_loader, output_file="%s_%s.conll" % (relation_hdf5, args.stub)) exit(0)<block_end>train_loader=DataLoader(dataset=train_dataset batch_size=batch_size shuffle=<false>)<line_sep># 2 leads to device side asserts... , num_workers=1 <try_stmt><block_start>idx_loss_check,loss_recent_tot=0 0.<for_stmt>epoch range(epoch_start+1 epoch_max)# So this refers to the epoch-end value <block_start>time_estimate_last=t_start=time.time()<line_sep>model_stepwise.train()<for_stmt>idx,(features labels deps) enumerate(train_loader)<block_start>features,labels,deps=features.to(device) labels.to(device) deps.to(device)<line_sep>model_opt.zero_grad()<line_sep>out_class_logits,out_deps_logits=model_stepwise(features)<line_sep>#batch_loss = ce_loss(output, target) # https://pytorch.org/docs/stable/nn.html?highlight=loss#torch.nn.BCEWithLogitsLoss class_loss=nn.CrossEntropyLoss(reduction='none')(out_class_logits labels)<line_sep>#print("class_loss.size()=", class_loss.size()) # class_loss.size()= torch.Size([8, 128]) class_loss_tot=class_loss.sum()<line_sep># The dep loss should be ignored for those deps with class==0 dep_loss=nn.CrossEntropyLoss(reduction='none')(out_deps_logits deps)<line_sep>#print("dep_loss.size()=", dep_loss.size()) # dep_loss.size()= torch.Size([8, 128]) #dep_loss_masked = torch.where(labels>0, dep_loss, zero) # This zeros out all positions where labels == 0 #dep_loss_tot = dep_loss_masked.sum() / batch_size dep_loss_tot=dep_loss.masked_fill_(labels<eq>0 0.).sum()<line_sep>factor_hints="Factor hints (class_loss=%8.4f, deps_loss=%10.4f, fac=%.8f)"%(class_loss_tot.item()/batch_size<times>100. dep_loss_tot.item()/batch_size<times>100. class_loss_tot.item()/dep_loss_tot.item() )<line_sep>#factor hints : (231.14927673339844, 225.23297119140625, 1.0262674932124587) batch_loss=class_loss_tot+args.dep_fac<times>dep_loss_tot<line_sep>batch_loss.backward()<line_sep>model_opt.step()<line_sep>loss_this=batch_loss.item()<line_sep>loss_recent_tot<augadd>loss_this<if_stmt>idx%10<eq>0<block_start>print('%.1f%% of epoch %d'%(idx/float(len(train_loader))<times>100 epoch ) end='\r')<block_end># Python 3 FTW! <if_stmt>idx%100<eq>0<block_start>print(epoch idx factor_hints)<block_end>sentences_since_last_check=(idx-idx_loss_check)<times>batch_size<line_sep>#if sentences_since_last_check > 50000: # Potentially save every 50000 sentences (~30mins on TitanX) <if_stmt>sentences_since_last_check<g>200000# Potentially save every 200000 sentences (~2hrs on TitanX) <block_start>loss_recent=loss_recent_tot/float(sentences_since_last_check)# loss per sentence <if_stmt>loss_best<is><none><or>loss_recent<l>loss_best# Save model if loss has decreased <block_start>fname='./checkpoints/model-grapher_%s_%02d-%07d.pth'%(args.stub epoch idx<times>batch_size )<line_sep>print("Saving Checkpoint : '%s', loss_recent=%.4f"%(fname loss_recent/batch_size<times>100. ))<line_sep>torch.save(dict(epoch=epoch model=model_stepwise.state_dict() optimizer=model_opt.state_dict() #lr_scheduler=lr_scheduler.state_dict(), ) fname)<line_sep>loss_best=loss_recent<line_sep>idx_loss_check,loss_recent_tot=idx 0.<block_end><block_end># Restart running tallies t_now=time.time()<if_stmt>t_now-time_estimate_last<g>5<times>60.# Update every 5 minutes <block_start>calc_duration=t_now-t_start<line_sep>calc_fraction=(idx<times>batch_size)/len(train_dataset)<line_sep>epoch_duration=calc_duration/calc_fraction<line_sep>epoch_max_secs=(epoch_max-(epoch+calc_fraction))<times>epoch_duration<line_sep>epoch_max_end=epoch_max_secs+time.time()# This is since the epoch in seconds print("Time used for %.2f of epoch %d: %.1f seconds"%(calc_fraction epoch calc_duration ))<line_sep>print(" Time per 1000 lines : %.3f seconds"%(epoch_duration/len(train_dataset)<times>1000. ))<line_sep>print(" Expected finish in : %.2f hours"%(epoch_max_secs/60/60 ))<line_sep>#print(" Expected finish time : %s (server)" % ( datetime.fromtimestamp(epoch_max_end).strftime("%A, %B %d, %Y %H:%M:%S %Z%z"), )) print(" Expected finish time : %s (%s)"%(datetime.fromtimestamp(epoch_max_end timezone.utc).astimezone(tz=tz).strftime("%A, %B %d, %Y %H:%M:%S %Z%z") args.tz ))<line_sep>time_estimate_last=time.time()<block_end><block_end># Keep track of estimate times idx_loss_check<augsub>len(train_dataset)/batch_size# Keep track of reset idxs # End-of-epoch saving fname='./checkpoints/model-grapher_%s_%02d-%07d_end-epoch.pth'%(args.stub epoch idx<times>batch_size )<line_sep>print("Saving End-epoch checkpoint : '%s'"%(fname ))<line_sep>torch.save(dict(epoch=epoch model=model_stepwise.state_dict() optimizer=model_opt.state_dict() ) fname)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start>print("Interrupted. Releasing resources...")<block_end><finally_stmt><block_start>train_dataset.close()<block_end>exit(0)<block_end>
# # chmod this file securely and be sure to remove the default users # users={"frodo":"1ring" "yossarian":"catch22" "ayla":"jondalar" }<line_sep>
""" Text GAN Adverserial networks applied to language models using Gumbel Softmax. Can be used as pure language model. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_from_stmt>collections namedtuple<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.contrib.framework.python.ops.variables get_or_create_global_step<line_sep># -- local imports <import_from_stmt>data_loader get_corpus_size build_vocab preprocess get_input_queues<import_stmt>layers<as>lay<import_from_stmt>decoders gumbel_decoder_fn<line_sep>GENERATOR_PREFIX="generator"<line_sep>DISCRIMINATOR_PREFIX="discriminator"<line_sep>GeneratorTuple=namedtuple("Generator" ["rnn_outputs" "flat_logits" "probs" "loss" "embedding_matrix" "output_projections"])<line_sep>DiscriminatorTuple=namedtuple("Discriminator" ["rnn_final_state" "prediction_logits" "loss"])<line_sep># TODO: separate the variables for generator and discriminators <class_stmt>Model<block_start><def_stmt>__init__ self corpus **opts<block_start>self.corpus=corpus<line_sep>self.opts=opts<line_sep>self.global_step=get_or_create_global_step()<line_sep>self.increment_global_step_op=tf.assign(self.global_step self.global_step+1 name="increment_global_step")<line_sep>self.corpus_size=get_corpus_size(self.corpus["train"])<line_sep>self.corpus_size_valid=get_corpus_size(self.corpus["valid"])<line_sep>self.word2idx,self.idx2word=build_vocab(self.corpus["train"])<line_sep>self.vocab_size=len(self.word2idx)<line_sep>self.generator_template=tf.make_template(GENERATOR_PREFIX generator)<line_sep>self.discriminator_template=tf.make_template(DISCRIMINATOR_PREFIX discriminator)<line_sep>self.enqueue_data,_,source,target,sequence_length=prepare_data(self.corpus["train"] self.word2idx num_threads=7 **self.opts)<line_sep># TODO: option to either do pretrain or just generate? self.g_tensors_pretrain=self.generator_template(source target sequence_length self.vocab_size **self.opts)<line_sep>self.enqueue_data_valid,self.input_ph,source_valid,target_valid,sequence_length_valid=prepare_data(self.corpus["valid"] self.word2idx num_threads=1 **self.opts)<line_sep>self.g_tensors_pretrain_valid=self.generator_template(source_valid target_valid sequence_length_valid self.vocab_size **self.opts)<line_sep>self.decoder_fn=prepare_custom_decoder(sequence_length self.g_tensors_pretrain.embedding_matrix self.g_tensors_pretrain.output_projections)<line_sep>self.g_tensors_fake=self.generator_template(source target sequence_length self.vocab_size decoder_fn=self.decoder_fn **self.opts)<line_sep>self.g_tensors_fake_valid=self.generator_template(source_valid target_valid sequence_length_valid self.vocab_size decoder_fn=self.decoder_fn **self.opts)<line_sep># TODO: using the rnn outputs from pretraining as "real" instead of target embeddings (aka professor forcing) self.d_tensors_real=self.discriminator_template(self.g_tensors_pretrain.rnn_outputs sequence_length is_real=<true> **self.opts)<line_sep># TODO: check to see if sequence_length is correct self.d_tensors_fake=self.discriminator_template(self.g_tensors_fake.rnn_outputs <none> is_real=<false> **self.opts)<line_sep>self.g_tvars=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES scope=GENERATOR_PREFIX)<line_sep>self.d_tvars=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES scope=DISCRIMINATOR_PREFIX)<block_end><block_end><def_stmt>prepare_data path word2idx num_threads=8 **opts<block_start><with_stmt>tf.device("/cpu:0")<block_start>enqueue_data,dequeue_batch=get_input_queues(path word2idx batch_size=opts["batch_size"] num_threads=num_threads)<line_sep># TODO: put this logic somewhere else input_ph=tf.placeholder_with_default(dequeue_batch (<none> <none>))<line_sep>source,target,sequence_length=preprocess(input_ph)<block_end><return>enqueue_data input_ph source target sequence_length<block_end><def_stmt>prepare_custom_decoder sequence_length embedding_matrix output_projections# TODO: this is brittle, global variables <block_start>cell=tf.get_collection("rnn_cell")[0]<line_sep>encoder_state=cell.zero_state(tf.shape(sequence_length)[0] tf.float32)<line_sep># embedding_matrix = tf.get_collection("embedding_matrix")[0] # output_projections = tf.get_collection("output_projections")[:2] # TODO: repeated output_projections maximum_length=tf.reduce_max(sequence_length)+3<line_sep>decoder_fn=gumbel_decoder_fn(encoder_state embedding_matrix output_projections maximum_length)<line_sep><return>decoder_fn<block_end><def_stmt>generator source target sequence_length vocab_size decoder_fn=<none> **opts<block_start>""" Args: source: TensorFlow queue or placeholder tensor for word ids for source target: TensorFlow queue or placeholder tensor for word ids for target sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence vocab_size: max vocab size determined from data decoder_fn: if using custom decoder_fn else use the default dynamic_rnn """<line_sep>tf.logging.info(" Setting up generator")<line_sep>embedding_layer=lay.embedding_layer(vocab_size opts["embedding_dim"] name="embedding_matrix")<line_sep># TODO: add batch norm? rnn_outputs=(source<rshift>embedding_layer<rshift>lay.word_dropout_layer(keep_prob=opts["word_dropout_keep_prob"])<rshift>lay.recurrent_layer(hidden_dims=opts["rnn_hidden_dim"] keep_prob=opts["recurrent_dropout_keep_prob"] sequence_length=sequence_length decoder_fn=decoder_fn name="rnn_cell"))<line_sep>output_projection_layer=lay.dense_layer(hidden_dims=vocab_size name="output_projections")<line_sep>flat_logits=(rnn_outputs<rshift>lay.reshape_layer(shape=(-1 opts["rnn_hidden_dim"]))<rshift>output_projection_layer)<line_sep>probs=flat_logits<rshift>lay.softmax_layer()<line_sep>embedding_matrix=embedding_layer.get_variables_in_scope()<line_sep>output_projections=output_projection_layer.get_variables_in_scope()<if_stmt>decoder_fn<is><not><none><block_start><return>GeneratorTuple(rnn_outputs=rnn_outputs flat_logits=flat_logits probs=probs loss=<none> embedding_matrix=embedding_matrix[0] output_projections=output_projections)<block_end>loss=(flat_logits<rshift>lay.cross_entropy_layer(target=target)<rshift>lay.reshape_layer(shape=tf.shape(target))<rshift>lay.mean_loss_by_example_layer(sequence_length=sequence_length))<line_sep># TODO: add dropout penalty <return>GeneratorTuple(rnn_outputs=rnn_outputs flat_logits=flat_logits probs=probs loss=loss embedding_matrix=embedding_matrix[0] output_projections=output_projections)<block_end><def_stmt>discriminator input_vectors sequence_length is_real=<true> **opts<block_start>""" Args: input_vectors: output of the RNN either from real or generated data sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence is_real: if True, RNN outputs when feeding in actual data, if False feeds in generated data """<line_sep>tf.logging.info(" Setting up discriminator")<line_sep>rnn_final_state=(input_vectors<rshift>lay.dense_layer(hidden_dims=opts["embedding_dim"])<rshift>lay.recurrent_layer(sequence_length=sequence_length hidden_dims=opts["rnn_hidden_dim"] return_final_state=<true>))<line_sep>prediction_logits=(rnn_final_state<rshift>lay.dense_layer(hidden_dims=opts["output_hidden_dim"])<rshift>lay.relu_layer()<rshift>lay.dropout_layer(opts["output_dropout_keep_prob"])<rshift>lay.dense_layer(hidden_dims=opts["output_hidden_dim"])<rshift>lay.relu_layer()<rshift>lay.dropout_layer(opts["output_dropout_keep_prob"])<rshift>lay.dense_layer(hidden_dims=1))<if_stmt>is_real<block_start>target=tf.ones_like(prediction_logits)<block_end><else_stmt><block_start>target=tf.zeros_like(prediction_logits)<block_end># TODO: add accuracy loss=(prediction_logits<rshift>lay.sigmoid_cross_entropy_layer(target=target))<line_sep># TODO: return logits in case for WGAN and l2 GANs <return>DiscriminatorTuple(rnn_final_state=rnn_final_state prediction_logits=prediction_logits loss=loss)<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_from_stmt>data_loader DATA_PATH<import_from_stmt>train opts<line_sep>corpus=DATA_PATH["ptb"]<line_sep>model=Model(corpus **opts)<block_end>
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_stmt>unittest<import_stmt>os<import_stmt>os.path<import_from_stmt>programy.rdf.collection RDFCollection<import_from_stmt>programy.storage.entities.store Store<class_stmt>RDFStoreAsserts(unittest.TestCase)<block_start><def_stmt>assert_rdf_storage self store<block_start>store.empty()<line_sep>store.add_rdf("ACTIVITY" "ACT" "hasPurpose" "to entertain by performing")<line_sep>store.add_rdf("ACTIVITY" "ACT" "hasSize" "0")<line_sep>store.add_rdf("ACTIVITY" "ACT" "hasSyllables" "1")<line_sep>store.add_rdf("ACTIVITY" "ACT" "isa" "Activity0")<line_sep>store.add_rdf("ACTIVITY" "ACT" "lifeArea" "Recreation")<line_sep>store.commit()<line_sep>rdf_collection=RDFCollection()<line_sep>store.load(rdf_collection "ACTIVITY")<line_sep>self.assertTrue(rdf_collection.contains("ACTIVITY"))<line_sep>self.assertTrue(rdf_collection.has_subject('ACT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ACT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ACT' "hasPurpose" "to entertain by performing"))<block_end><def_stmt>assert_upload_from_text self store<block_start>store.empty()<line_sep>store.upload_from_text("ACTIVITY" """ ACT:hasPurpose:to entertain by performing ACT:hasSize:0 ACT:hasSyllables:1 ACT:isa:Activity ACT:lifeArea:Recreation ADVENTURE:hasPurpose:to provide new experience ADVENTURE:hasSize:0 ADVENTURE:hasSyllables:3 ADVENTURE:isa:Activity ADVENTURE:lifeArea:Recreation FISHING:hasPurpose:to hunt for fish FISHING:hasSize:0 FISHING:hasSyllables:2 FISHING:isa:Activity FISHING:lifeArea:Recreation """)<line_sep>rdf_collection=RDFCollection()<line_sep>store.load(rdf_collection "ACTIVITY")<line_sep>self.assertTrue(rdf_collection.contains("ACTIVITY"))<line_sep>self.assertTrue(rdf_collection.has_subject('ACT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ACT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ACT' "hasPurpose" "to entertain by performing"))<block_end><def_stmt>assert_upload_from_text_file self store<block_start>store.empty()<line_sep>store.upload_from_file(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"text"+os.sep+"activity.rdf")<line_sep>rdf_collection=RDFCollection()<line_sep>store.load(rdf_collection "ACTIVITY")<line_sep>self.assertTrue(rdf_collection.contains("ACTIVITY"))<line_sep>self.assertTrue(rdf_collection.has_subject('ACT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ACT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ACT' "hasPurpose" "to entertain by performing"))<block_end><def_stmt>assert_upload_text_files_from_directory_no_subdir self store<block_start>store.empty()<line_sep>store.upload_from_directory(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"text" subdir=<false>)<line_sep>rdf_collection=RDFCollection()<line_sep>store.load(rdf_collection "ACTIVITY")<line_sep>self.assertTrue(rdf_collection.contains("ACTIVITY"))<line_sep>self.assertTrue(rdf_collection.has_subject('ACT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ACT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ACT' "hasPurpose" "to entertain by performing"))<block_end><def_stmt>assert_upload_from_csv_file self store<block_start>store.empty()<line_sep>store.upload_from_file(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"csv"+os.sep+"activity.csv" format=Store.CSV_FORMAT)<line_sep>rdf_collection=RDFCollection()<line_sep>store.load(rdf_collection "ACTIVITY")<line_sep>self.assertTrue(rdf_collection.contains("ACTIVITY"))<line_sep>self.assertTrue(rdf_collection.has_subject('ACT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ACT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ACT' "hasPurpose" "to entertain by performing"))<block_end><def_stmt>assert_upload_csv_files_from_directory_with_subdir self store<block_start>store.empty()<line_sep>store.upload_from_directory(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"csv" subdir=<true> format=Store.CSV_FORMAT)<line_sep>rdf_collection=RDFCollection()<line_sep>store.load_all(rdf_collection)<line_sep>self.assertTrue(rdf_collection.contains("ACTIVITY"))<line_sep>self.assertTrue(rdf_collection.has_subject('ACT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ACT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ACT' "hasPurpose" "to entertain by performing"))<line_sep>self.assertTrue(rdf_collection.contains("ANIMAL"))<line_sep>self.assertTrue(rdf_collection.has_subject('ANT'))<line_sep>self.assertTrue(rdf_collection.has_predicate('ANT' "hasPurpose"))<line_sep>self.assertTrue(rdf_collection.has_object('ANT' "hasPurpose" "to make anthills"))<block_end><block_end>
# pylint: skip-file # pylint: disable=too-many-instance-attributes <class_stmt>VMInstance(GCPResource)<block_start>'''Object to represent a gcp instance'''<line_sep>resource_type="compute.v1.instance"<line_sep># pylint: disable=too-many-arguments <def_stmt>__init__ self rname project zone machine_type metadata tags disks network_interfaces service_accounts=<none> <block_start>'''constructor for gcp resource'''<line_sep>super(VMInstance self).__init__(rname VMInstance.resource_type project zone)<line_sep>self._machine_type=machine_type<line_sep>self._service_accounts=service_accounts<line_sep>self._machine_type_url=<none><line_sep>self._tags=tags<line_sep>self._metadata=[]<if_stmt>metadata<and>isinstance(metadata dict)<block_start>self._metadata={'items':[{'key':key 'value':value}<for>key,value metadata.items()]}<block_end><elif_stmt>metadata<and>isinstance(metadata list)<block_start>self._metadata=[{'key':label['key'] 'value':label['value']}<for>label metadata]<block_end>self._disks=disks<line_sep>self._network_interfaces=network_interfaces<line_sep>self._properties=<none><block_end>@property<def_stmt>service_accounts self<block_start>'''property for resource service accounts '''<line_sep><return>self._service_accounts<block_end>@property<def_stmt>network_interfaces self<block_start>'''property for resource machine network_interfaces '''<line_sep><return>self._network_interfaces<block_end>@property<def_stmt>machine_type self<block_start>'''property for resource machine type '''<line_sep><return>self._machine_type<block_end>@property<def_stmt>machine_type_url self<block_start>'''property for resource machine type url'''<if_stmt>self._machine_type_url<eq><none><block_start>self._machine_type_url=Utils.zonal_compute_url(self.project self.zone 'machineTypes' self.machine_type)<block_end><return>self._machine_type_url<block_end>@property<def_stmt>tags self<block_start>'''property for resource tags '''<line_sep><return>self._tags<block_end>@property<def_stmt>metadata self<block_start>'''property for resource metadata'''<line_sep><return>self._metadata<block_end>@property<def_stmt>disks self<block_start>'''property for resource disks'''<line_sep><return>self._disks<block_end>@property<def_stmt>properties self<block_start>'''property for holding the properties'''<if_stmt>self._properties<eq><none><block_start>self._properties={'zone':self.zone 'machineType':self.machine_type_url 'metadata':self.metadata 'tags':self.tags 'disks':self.disks 'networkInterfaces':self.network_interfaces }<if_stmt>self.service_accounts<block_start>self._properties['serviceAccounts']=self.service_accounts<block_end><block_end><return>self._properties<block_end><def_stmt>to_resource self<block_start>'''return the resource representation'''<line_sep><return>{'name':self.name 'type':VMInstance.resource_type 'properties':self.properties }<block_end><block_end>
# -*- coding: utf-8 -*- """ Created on Sun Jul 21 15:02:36 2019 Bresenham画圆法实现 博客教程地址: https://blog.csdn.net/varyshare/article/details/96724103 @author: 知乎@Ai酱 """<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>img=np.zeros((105 105))# 创建一个105x105的画布 count=0<def_stmt>draw x y<block_start>""" 绘制点(x,y) 注意:需要把(x,y)变换到数组坐标系(图形学坐标系) 因为数组(0,0)是左上,而原先坐标系(0,0)是中心点 而且数组行数向下是增加的。 """<line_sep>img[-y+int(img.shape[0]/2) x+int(img.shape[1]/2)]=1<block_end><pass><line_sep>r_pixel=50# 圆的半径,单位:像素 # 初始化,画第一个点,从水平最右边那个点开始画 (x y)=(r_pixel 0)<line_sep>""" 从定义来讲就是 P_k=d1+d2 d1 = 第1个下一步待选点离圆弧的距离(负数) d2 = 第2个下一步待选点离圆弧的距离(正数) 但是为了提高效率通常使用递推来求P_{k+1}=P_k + 一个数 """<line_sep>P_k=-2<times>r_pixel+3<line_sep># 迭代的求完1/8圆弧 <while_stmt>x<ge>y# 下一步有两个待选点,具体选哪个要看P_k>0 或 <0 <block_start><if_stmt>P_k<ge>0# 外侧候选点偏离圆弧更远 <block_start>P_k_next=P_k-4<times>x+4<times>y+10<line_sep>(x_next y_next)=(x-1 y+1)<block_end><else_stmt># 内侧候选点偏离圆弧更远 <block_start>P_k_next=P_k+4<times>y+6<line_sep>(x_next y_next)=(x y+1)<block_end># 对称法画其他地方 draw(x y)<line_sep>draw(-x y)<line_sep>draw(x -y)<line_sep>draw(-x -y)<line_sep>draw(y x)<line_sep>draw(y -x)<line_sep>draw(-y x)<line_sep>draw(-y -x)<line_sep># 更新坐标和P_k (x y)=(int(x_next) int(y_next))<line_sep>P_k=P_k_next<block_end><pass><line_sep># 绘制图片 plt.imshow(img)<line_sep>
"""Manages GO Term fill colors and bordercolors."""<line_sep>__copyright__="Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved."<line_sep>__author__="<NAME>"<import_stmt>sys<import_stmt>collections<as>cx<class_stmt>GoeaResults(object)<block_start>"""Manages GOEA Results for plotting."""<line_sep>kws_set=set(['id2symbol' 'study_items' 'items_p_line ' 'pval_name'])<line_sep>dflt_items_p_line=5# study items (e.g., genes) per line on GO Terms fmtres="{study_count} genes"<line_sep>alpha2col=cx.OrderedDict([# Enriched GOEA GO terms that are significant (0.005 'mistyrose') (0.010 'moccasin') (0.050 'lemonchiffon1') # GOEA GO terms that are not significant (1.000 'grey95') ])<def_stmt>__init__ self goea_results **kws# kws: goea_results or go2nt <block_start><assert_stmt>goea_results "NO GOEA RESULTS IN GoeaResults INPUTS"<line_sep># GOATOOLs results as objects (WAS: Kws goea_results go2nt) self.go2res={r.GO:r<for>r goea_results}<line_sep>self.is_goterm=hasattr(goea_results[0] "_fldsdefprt")<line_sep># GOATOOLs results as a list of namedtuples self.pval_name=self._init_pval_name(**kws)<line_sep>self.study_items=kws.get('study_items' <none>)<line_sep>self.study_items_max=self._init_study_items_max()<line_sep>self.items_p_line=kws['items_p_line']<if>'items_p_line'<in>kws<else>self.dflt_items_p_line<line_sep>self.id2symbol=kws['id2symbol']<if>'id2symbol'<in>kws<else>{}<block_end><def_stmt>prt_summary self prt=sys.stdout<block_start>"""Print summary of GOEA plotting object."""<line_sep>desc="NtGoeaResults"<if>self.is_goterm<else>"namedtuple"<line_sep>prt.write("{N} GOEA results from {O}. P-values stored in {P}.\n".format(N=len(self.go2res) O=desc P=self.pval_name))<block_end><def_stmt>get_study_txt self goid<block_start>"""Get GO text from GOEA study."""<if_stmt>goid<in>self.go2res<block_start>res=self.go2res[goid]<if_stmt>res.study_items<is><not><none><block_start><return>self._get_item_str(res)<block_end><else_stmt><block_start><return>self.fmtres.format(study_count=res.study_count)<block_end><block_end><block_end><def_stmt>set_goid2color_pval self goid2color<block_start>"""Fill missing colors based on p-value of an enriched GO term."""<line_sep>alpha2col=self.alpha2col<if_stmt>self.pval_name<is><not><none><block_start>pval_name=self.pval_name<for_stmt>goid,res self.go2res.items()<block_start>pval=getattr(res pval_name <none>)<if_stmt>pval<is><not><none><block_start><for_stmt>alpha,color alpha2col.items()<block_start><if_stmt>pval<le>alpha<and>res.study_count<ne>0<block_start><if_stmt>goid<not><in>goid2color<block_start>goid2color[goid]=color<block_end><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>get_goid2color_pval self<block_start>"""Return a go2color dict containing GO colors determined by P-value."""<line_sep>go2color={}<line_sep>self.set_goid2color_pval(go2color)<line_sep>color_dflt=self.alpha2col[1.000]<for_stmt>goid self.go2res<block_start><if_stmt>goid<not><in>go2color<block_start>go2color[goid]=color_dflt<block_end><block_end><return>go2color<block_end><def_stmt>_get_item_str self res<block_start>"""Return genes in any of these formats: 1. 19264, 17319, 12520, 12043, 74131, 22163, 12575 2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a 3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3... """<line_sep>ipl=self.items_p_line<line_sep>prt_items=sorted([self._get_genestr(itemid)<for>itemid res.study_items])<line_sep>prt_multiline=[prt_items[i:i+ipl]<for>i range(0 len(prt_items) ipl)]<line_sep>num_items=len(prt_items)<if_stmt>self.study_items_max<is><none><block_start>genestr="\n".join([", ".join(str(e)<for>e sublist)<for>sublist prt_multiline])<line_sep><return>"{N}) {GENES}".format(N=num_items GENES=genestr)<block_end><else_stmt><block_start><if_stmt>num_items<le>self.study_items_max<block_start>gene_lines=[", ".join(str(e)<for>e sublist)<for>sublist prt_multiline]<line_sep>genestr="\n".join(gene_lines)<line_sep><return>genestr<block_end><else_stmt><block_start>short_list=prt_items[:self.study_items_max]<line_sep>short_mult=[short_list[i:i+ipl]<for>i range(0 len(short_list) ipl)]<line_sep>short_lines=[", ".join(str(e)<for>e sublist)<for>sublist short_mult]<line_sep>short_str="\n".join(short_lines)<line_sep><return>"".join(["{N} genes; ".format(N=num_items) short_str "..."])<block_end><block_end><block_end><def_stmt>_get_genestr self itemid<block_start>"""Given a geneid, return the string geneid or a gene symbol."""<if_stmt>itemid<in>self.id2symbol<block_start>symbol=self.id2symbol[itemid]<if_stmt>symbol<is><not><none><block_start><return>symbol<block_end><block_end><if_stmt>isinstance(itemid int)<block_start><return>str(itemid)<block_end><return>itemid<block_end><def_stmt>_init_pval_name self **kws<block_start>"""Initialize pvalue attribute name."""<if_stmt>'pval_name'<in>kws<block_start><return>kws['pval_name']<block_end># If go2res contains GO Terms <if_stmt>self.is_goterm<block_start><return>"p_{M}".format(M=next(iter(self.go2res.values())).get_method_name())<block_end># If go2res contains GO namedtuples <for_stmt>fld next(iter(self.go2res.values()))._fields<block_start><if_stmt>fld[:2]<eq>'p_'<and>fld<ne>'p_uncorrected'<block_start><return>fld<block_end><block_end><block_end><def_stmt>_init_study_items_max self<block_start>"""User can limit the number of genes printed in a GO term."""<if_stmt>self.study_items<is><none><block_start><return><none><block_end><if_stmt>self.study_items<is><true><block_start><return><none><block_end><if_stmt>isinstance(self.study_items int)<block_start><return>self.study_items<block_end><return><none><block_end><block_end># Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.
<import_from_future_stmt> print_function<import_stmt>logrun<import_stmt>deepstate_base<class_stmt>BoringDisabledTest(deepstate_base.DeepStateTestCase)<block_start><def_stmt>run_deepstate self deepstate<block_start>(r output)=logrun.logrun([deepstate "build/examples/BoringDisabled"] "deepstate.out" 1800)<line_sep>self.assertEqual(r 0)<line_sep>self.assertTrue("Passed: CharTest_BoringVerifyCheck"<in>output)<line_sep>self.assertTrue("Failed: CharTest_VerifyCheck"<in>output)<block_end><block_end>
# %% [markdown] # ## Determining the best naive predictor for the f1 score # - If there are 2 classes that are skewed, then the most common value is often slightly better than the random guess # - If there are 4 classes that are skewed, then the random value is often slightly better than the most common value # - If the classes (2 or 4) are balanced, then the random guess is usually significantly better than the most common value. # # Summing up, random values are usually preferred over the most common value. # # However, the best baseline is the maximum of the f1_score of the most common value and random values. # %% <import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>sklearn.metrics f1_score<line_sep># %% df=pd.DataFrame({"boolean_equal":np.random.choice(["yes" "no"] 1000) "boolean_skewed":np.random.choice(["yes" "yes" "yes" "no"] 1000) "multicat_equal":np.random.choice(["cat1" "cat2" "cat3" "cat4"] 1000) "multicat_skewed":np.random.choice(["cat1" "cat1" "cat1" "cat1" "cat2" "cat2" "cat3" "cat4"] 1000) })<line_sep># %% <def_stmt>f1_score_most_common series value<block_start><return>f1_score(series np.random.choice([value] 1000) average="weighted")<block_end># %% <def_stmt>f1_score_random series<block_start><return>f1_score(series series.sample(frac=1) average="weighted")<block_end># %% [markdown] # ### Boolean equal # - Random is better than most common # %% f1_score_most_common(df["boolean_equal"] "yes")<line_sep># %% f1_score_random(df["boolean_equal"])<line_sep># %% [markdown] # ### Boolean skewed # - Most common is usually better than random but they are in the same ball park # %% f1_score_most_common(df["boolean_skewed"] "yes")<line_sep># %% f1_score_random(df["boolean_skewed"])<line_sep># %% [markdown] # ### Multicat equal # - Random is better than most common # %% f1_score_most_common(df["multicat_equal"] "cat1")<line_sep># %% f1_score_random(df["multicat_equal"])<line_sep># %% [markdown] # ### Multicat skewed # - Random is usually better than most common but they are in the same ballpark # %% f1_score_most_common(df["multicat_skewed"] "cat1")<line_sep># %% f1_score_random(df["multicat_skewed"])<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>django.db models transaction IntegrityError<import_from_stmt>django.utils.timezone now<import_from_stmt>. app_settings<import_from_stmt>. utils<class_stmt>PasswordChangeManager(models.Manager)<block_start><def_stmt>get_or_create_for_user self user<block_start><return>self.get_or_create(user=user)<block_end><def_stmt>is_required_for_user self user<block_start>obj,created=self.get_or_create_for_user(user=user)<line_sep><return>obj.required<block_end><block_end><class_stmt>SessionManager(models.Manager)<block_start><def_stmt>active self user=<none><block_start>qs=self.filter(expiration_date__gt=now())<if_stmt>user<is><not><none><block_start>qs=qs.filter(user=user)<block_end><return>qs.order_by('-last_activity')<block_end><def_stmt>create_session self request user<block_start>ip=utils.resolve(app_settings.IP_RESOLVER request)<line_sep>device=utils.resolve(app_settings.DEVICE_RESOLVER request)<line_sep>location=utils.resolve(app_settings.LOCATION_RESOLVER request)<line_sep>user_agent=request.META.get('HTTP_USER_AGENT' '')<line_sep>user_agent=user_agent[:200]<if>user_agent<else>user_agent<try_stmt><block_start><with_stmt>transaction.atomic()<block_start>obj=self.create(user=user session_key=request.session.session_key ip=ip user_agent=user_agent device=device location=location expiration_date=request.session.get_expiry_date() last_activity=now())<block_end><block_end><except_stmt>IntegrityError<block_start>obj=self.get(user=user session_key=request.session.session_key)<line_sep>obj.last_activity=now()<line_sep>obj.save()<block_end><return>obj<block_end><block_end>
__version__='0.11.0'<line_sep>default_app_config='mjml.apps.MJMLConfig'<line_sep>
<import_stmt>sys<import_stmt>os<import_stmt>webbrowser<import_from_stmt>pathlib Path<import_from_stmt>jina Document Flow<def_stmt>config <block_start>os.environ.setdefault('JINA_USE_CUDA' 'False')<line_sep>os.environ.setdefault('JINA_PORT_EXPOSE' '8886')<line_sep>os.environ.setdefault('JINA_WORKSPACE' './workspace')<block_end><def_stmt>index file_name<block_start><def_stmt>load_marco fn<block_start>cnt=0<with_stmt>open(fn 'r')<as>f<block_start><for_stmt>ln,line enumerate(f)<block_start><try_stmt><block_start>title,para=line.strip().split('\t')<line_sep>doc=Document(id=f'{cnt}' uri=fn tags={'title':title 'para':para})<line_sep>cnt<augadd>1<line_sep><yield>doc<block_end><except_stmt><block_start>print(f'skip line {ln}')<line_sep><continue><block_end><block_end><block_end><block_end>f=Flow().load_config('flows/index.yml')<with_stmt>f<block_start>f.post(on='/index' inputs=load_marco(file_name) show_progress=<true> request_size=32)<block_end><block_end><def_stmt>fillin_html <block_start>source_fn=Path(__file__).parent.absolute()/'static/index_template.html'<line_sep>target_fn=Path(__file__).parent.absolute()/'static/index.html'<with_stmt>open(source_fn 'r')<as>fp open(target_fn 'w')<as>fw<block_start>t=fp.read()<line_sep>t=t.replace('{% JINA_PORT_EXPOSE %}' f'{os.environ.get("JINA_PORT_EXPOSE")}')<line_sep>fw.write(t)<block_end><block_end><def_stmt>query <block_start><import_from_stmt>distutils.dir_util copy_tree<line_sep>fillin_html()<line_sep>copy_tree('static' 'workspace/static')<line_sep>url_html_fn=Path(__file__).parent.absolute()/'workspace/static/index.html'<line_sep>url_html_path=f'file://{url_html_fn}'<line_sep>f=Flow().load_config('flows/query.yml')<with_stmt>f<block_start><try_stmt><block_start>webbrowser.open(url_html_path new=2)<block_end><except_stmt><block_start><pass><block_end><finally_stmt><block_start>print(f'You should see a demo page opened in your browser'<concat>f'if not, you may open {url_html_path} manually')<block_end>f.block()<block_end><block_end><def_stmt>query_cli <block_start><def_stmt>print_topk resp<block_start><for_stmt>doc resp.docs<block_start>print(doc)<line_sep>doc=Document(doc)<line_sep>print(f'🤖 Answers:')<for_stmt>m doc.matches<block_start>print(f'\t{m.tags["title"]}')<line_sep>print(f'\t{m.tags["para"]}')<line_sep>print(f'-----')<block_end><block_end><block_end>f=Flow().load_config('flows/query.yml')<with_stmt>f<block_start>f.protocol='grpc'<line_sep>print(f'🤖 Hi there, please ask me questions related to the indexed Documents.\n'<concat>'For example, "Who is <NAME>\'s brother?"\n')<while_stmt><true><block_start>text=input('Question: (type `\q` to quit)')<if_stmt>text<eq>'\q'<or><not>text<block_start><return><block_end>f.post(on='/search' inputs=[Document(content=text) ] on_done=print_topk)<block_end><block_end><block_end><def_stmt>main task<block_start>config()<if_stmt>task<eq>'index'<block_start><if_stmt>Path('./workspace').exists()<block_start>print('./workspace exists, please deleted it if you want to reindexi')<block_end>data_fn=sys.argv[2]<if>len(sys.argv)<ge>3<else>'toy_data/test.tsv'<line_sep>print(f'indexing {data_fn}')<line_sep>index(data_fn)<block_end><elif_stmt>task<eq>'query'<block_start>query()<block_end><elif_stmt>task<eq>'query_cli'<block_start>query_cli()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>task=sys.argv[1]<line_sep>main(task)<block_end>
# author: Bartlomiej "furas" Burek (https://blog.furas.pl) # date: 2021.12.01 # # title: Scrape for Table with Limits # url: https://stackoverflow.com/questions/70179298/scrape-for-table-with-limits/70180875#70180875 # [Scrape for Table with Limits](https://stackoverflow.com/questions/70179298/scrape-for-table-with-limits/70180875#70180875) <import_stmt>requests<import_stmt>pandas<as>pd<line_sep># --- before loop --- url='https://api.nhle.com/stats/rest/en/team/daysbetweengames'<line_sep>payload={'isAggregate':'false' 'isGame':'true' 'start':0 'limit':100 'sort':'[{"property":"teamFullName","direction":"ASC"},{"property":"daysRest","direction":"DESC"},{"property":"teamId","direction":"ASC"}]' 'factCayenneExp':'gamesPlayed>=1' 'cayenneExp':'gameDate<="2021-11-30 23:59:59" and gameDate>="2021-10-12" and gameTypeId=2' }<line_sep>df=pd.DataFrame()<line_sep># --- loop --- <for_stmt>start range(0 1000 100)<block_start>print('--- start:' start '---')<line_sep>payload['start']=start<line_sep>response=requests.get(url params=payload)<line_sep>data=response.json()<line_sep>df=df.append(data['data'] ignore_index=<true>)<block_end># --- after loop --- print(df)<line_sep>df.to_excel('Master File.xlsx' sheet_name='Info')<line_sep>print(df.iloc[0])<line_sep>print(df.iloc[100])<line_sep>
# # Copyright (c) 2020 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # arrow_conversion.py # # Part of text_extensions_for_pandas # # Provide Arrow compatible classes for serializing to pyarrow. # <import_from_stmt>distutils.version LooseVersion<import_stmt>numpy<as>np<import_stmt>pyarrow<as>pa<import_from_stmt>text_extensions_for_pandas.array.span SpanArray<import_from_stmt>text_extensions_for_pandas.array.token_span TokenSpanArray _EMPTY_SPAN_ARRAY_SINGLETON<import_from_stmt>text_extensions_for_pandas.array.tensor TensorArray<import_from_stmt>text_extensions_for_pandas.array.string_table StringTable<class_stmt>ArrowSpanType(pa.PyExtensionType)<block_start>""" PyArrow extension type definition for conversions to/from Span columns """<line_sep>BEGINS_NAME="span_begins"<line_sep>ENDS_NAME="span_ends"<line_sep>TARGET_TEXT_DICT_NAME="target_text"<def_stmt>__init__ self index_dtype target_text_dict_dtype<block_start>""" Create an instance of a Span data type with given index type and target text dictionary type. The dictionary type will hold target text ids that map to a dictionary of document target texts. :param index_dtype: type for the begin, end index arrays :param target_text_dict_dtype: type for the target text dictionary array """<assert_stmt>pa.types.is_integer(index_dtype)<assert_stmt>pa.types.is_dictionary(target_text_dict_dtype)<line_sep>fields=[pa.field(self.BEGINS_NAME index_dtype) pa.field(self.ENDS_NAME index_dtype) pa.field(self.TARGET_TEXT_DICT_NAME target_text_dict_dtype)]<line_sep>pa.PyExtensionType.__init__(self pa.struct(fields))<block_end><def_stmt>__reduce__ self<block_start>index_dtype=self.storage_type[self.BEGINS_NAME].type<line_sep>target_text_dict_dtype=self.storage_type[self.TARGET_TEXT_DICT_NAME].type<line_sep><return>ArrowSpanType (index_dtype target_text_dict_dtype)<block_end><block_end><class_stmt>ArrowTokenSpanType(pa.PyExtensionType)<block_start>""" PyArrow extension type definition for conversions to/from TokenSpan columns """<line_sep>BEGINS_NAME="token_begins"<line_sep>ENDS_NAME="token_ends"<line_sep>TOKENS_NAME="tokens"<def_stmt>__init__ self index_dtype token_dict_dtype<block_start>""" Create an instance of a TokenSpan data type with given index type and target text that will be stored in Field metadata. :param index_dtype: type for the begin, end index arrays :param token_dict_dtype: type for the tokens dictionary array """<assert_stmt>pa.types.is_integer(index_dtype)<assert_stmt>pa.types.is_dictionary(token_dict_dtype)<line_sep>fields=[pa.field(self.BEGINS_NAME index_dtype) pa.field(self.ENDS_NAME index_dtype) pa.field(self.TOKENS_NAME token_dict_dtype) ]<line_sep>pa.PyExtensionType.__init__(self pa.struct(fields))<block_end><def_stmt>__reduce__ self<block_start>index_dtype=self.storage_type[self.BEGINS_NAME].type<line_sep>token_dict_dtype=self.storage_type[self.TOKENS_NAME].type<line_sep><return>ArrowTokenSpanType (index_dtype token_dict_dtype)<block_end><block_end><def_stmt>span_to_arrow char_span:SpanArray<arrow>pa.ExtensionArray<block_start>""" Convert a SpanArray to a pyarrow.ExtensionArray with a type of ArrowSpanType and struct as the storage type. The resulting extension array can be serialized and transferred with standard Arrow protocols. :param char_span: A SpanArray to be converted :return: pyarrow.ExtensionArray containing Span data """<if_stmt>LooseVersion(pa.__version__)<l>LooseVersion("2.0.0")<block_start><raise>NotImplementedError("Arrow serialization for SpanArray is not supported with "<concat>"PyArrow versions < 2.0.0")<block_end># Create array for begins, ends begins_array=pa.array(char_span.begin)<line_sep>ends_array=pa.array(char_span.end)<line_sep># Create a dictionary array from StringTable used in this span dictionary=pa.array([char_span._string_table.unbox(s)<for>s char_span._string_table.things])<line_sep>target_text_dict_array=pa.DictionaryArray.from_arrays(char_span._text_ids dictionary)<line_sep>typ=ArrowSpanType(begins_array.type target_text_dict_array.type)<line_sep>fields=list(typ.storage_type)<line_sep>storage=pa.StructArray.from_arrays([begins_array ends_array target_text_dict_array] fields=fields)<line_sep><return>pa.ExtensionArray.from_storage(typ storage)<block_end><def_stmt>arrow_to_span extension_array:pa.ExtensionArray<arrow>SpanArray<block_start>""" Convert a pyarrow.ExtensionArray with type ArrowSpanType to a SpanArray. ..NOTE: Only supported with PyArrow >= 2.0.0 :param extension_array: pyarrow.ExtensionArray with type ArrowSpanType :return: SpanArray """<if_stmt>LooseVersion(pa.__version__)<l>LooseVersion("2.0.0")<block_start><raise>NotImplementedError("Arrow serialization for SpanArray is not supported with "<concat>"PyArrow versions < 2.0.0")<block_end><if_stmt>isinstance(extension_array pa.ChunkedArray)<block_start><if_stmt>extension_array.num_chunks<g>1<block_start><raise>ValueError("Only pyarrow.Array with a single chunk is supported")<block_end>extension_array=extension_array.chunk(0)<block_end># NOTE: workaround for bug in parquet reading <if_stmt>pa.types.is_struct(extension_array.type)<block_start>index_dtype=extension_array.field(ArrowSpanType.BEGINS_NAME).type<line_sep>target_text_dict_dtype=extension_array.field(ArrowSpanType.TARGET_TEXT_DICT_NAME).type<line_sep>extension_array=pa.ExtensionArray.from_storage(ArrowSpanType(index_dtype target_text_dict_dtype) extension_array)<block_end><assert_stmt>pa.types.is_struct(extension_array.storage.type)<line_sep># Create target text StringTable and text_ids from dictionary array target_text_dict_array=extension_array.storage.field(ArrowSpanType.TARGET_TEXT_DICT_NAME)<line_sep>table_texts=target_text_dict_array.dictionary.to_pylist()<line_sep>string_table=StringTable.from_things(table_texts)<line_sep>text_ids=target_text_dict_array.indices.to_numpy()<line_sep># Get the begins/ends pyarrow arrays begins_array=extension_array.storage.field(ArrowSpanType.BEGINS_NAME)<line_sep>ends_array=extension_array.storage.field(ArrowSpanType.ENDS_NAME)<line_sep># Zero-copy convert arrays to numpy begins=begins_array.to_numpy()<line_sep>ends=ends_array.to_numpy()<line_sep><return>SpanArray((string_table text_ids) begins ends)<block_end><def_stmt>token_span_to_arrow token_span:TokenSpanArray<arrow>pa.ExtensionArray<block_start>""" Convert a TokenSpanArray to a pyarrow.ExtensionArray with a type of ArrowTokenSpanType and struct as the storage type. The resulting extension array can be serialized and transferred with standard Arrow protocols. :param token_span: A TokenSpanArray to be converted :return: pyarrow.ExtensionArray containing TokenSpan data """<if_stmt>LooseVersion(pa.__version__)<l>LooseVersion("2.0.0")<block_start><raise>NotImplementedError("Arrow serialization for TokenSpanArray is not supported with "<concat>"PyArrow versions < 2.0.0")<block_end># Create arrays for begins/ends token_begins_array=pa.array(token_span.begin_token)<line_sep>token_ends_array=pa.array(token_span.end_token)<line_sep># Filter out any empty SpanArrays non_null_tokens=token_span.tokens[~token_span.isna()]<assert_stmt>len(non_null_tokens)<g>0<line_sep># Get either single document as a list or use a list of all if multiple docs <if_stmt>all([token<is>non_null_tokens[0]<for>token non_null_tokens])<block_start>tokens_arrays=[non_null_tokens[0]]<line_sep>tokens_indices=pa.array([0]<times>len(token_span.tokens) mask=token_span.isna())<block_end><else_stmt><block_start><raise>NotImplementedError("TokenSpan Multi-doc serialization not yet implemented due to "<concat>"ArrowNotImplementedError: Concat with dictionary unification NYI")<line_sep>tokens_arrays=non_null_tokens<line_sep>tokens_indices=np.zeros_like(token_span.tokens)<line_sep>tokens_indices[~token_span.isna()]=range(len(tokens_arrays))<line_sep>tokens_indices=pa.array(tokens_indices mask=token_span.isna())<block_end># Convert each token SpanArray to Arrow and get as raw storage arrow_tokens_arrays=[span_to_arrow(sa).storage<for>sa tokens_arrays]<line_sep># Create a list array with each element is an ArrowSpanArray # TODO: pyarrow.lib.ArrowNotImplementedError: ('Sequence converter for type dictionary<values=string, indices=int8, ordered=0> not implemented', 'Conversion failed for column ts1 with type TokenSpanDtype') #arrow_tokens_arrays_array = pa.array(arrow_tokens_arrays, type=pa.list_(arrow_tokens_arrays[0].type)) offsets=[0]+[len(a)<for>a arrow_tokens_arrays]<line_sep>values=pa.concat_arrays(arrow_tokens_arrays)# TODO: can't concat extension arrays? arrow_tokens_arrays_array=pa.ListArray.from_arrays(offsets values)<line_sep># Create a dictionary array mapping each token SpanArray index used to the list of ArrowSpanArrays tokens_dict_array=pa.DictionaryArray.from_arrays(tokens_indices arrow_tokens_arrays_array)<line_sep>typ=ArrowTokenSpanType(token_begins_array.type tokens_dict_array.type)<line_sep>fields=list(typ.storage_type)<line_sep>storage=pa.StructArray.from_arrays([token_begins_array token_ends_array tokens_dict_array] fields=fields)<line_sep><return>pa.ExtensionArray.from_storage(typ storage)<block_end><def_stmt>arrow_to_token_span extension_array:pa.ExtensionArray<arrow>TokenSpanArray<block_start>""" Convert a pyarrow.ExtensionArray with type ArrowTokenSpanType to a TokenSpanArray. :param extension_array: pyarrow.ExtensionArray with type ArrowTokenSpanType :return: TokenSpanArray """<if_stmt>LooseVersion(pa.__version__)<l>LooseVersion("2.0.0")<block_start><raise>NotImplementedError("Arrow serialization for TokenSpanArray is not supported with "<concat>"PyArrow versions < 2.0.0")<block_end><if_stmt>isinstance(extension_array pa.ChunkedArray)<block_start><if_stmt>extension_array.num_chunks<g>1<block_start><raise>ValueError("Only pyarrow.Array with a single chunk is supported")<block_end>extension_array=extension_array.chunk(0)<block_end><assert_stmt>pa.types.is_struct(extension_array.storage.type)<line_sep># Get the begins/ends pyarrow arrays token_begins_array=extension_array.storage.field(ArrowTokenSpanType.BEGINS_NAME)<line_sep>token_ends_array=extension_array.storage.field(ArrowTokenSpanType.ENDS_NAME)<line_sep># Get the tokens as a dictionary array where indices map to a list of ArrowSpanArrays tokens_dict_array=extension_array.storage.field(ArrowTokenSpanType.TOKENS_NAME)<line_sep>tokens_indices=tokens_dict_array.indices<line_sep>arrow_tokens_arrays_array=tokens_dict_array.dictionary<line_sep># Breakup the list of ArrowSpanArrays and convert back to individual SpanArrays tokens_arrays=[]<line_sep>span_type=<none><for_stmt>i range(1 len(arrow_tokens_arrays_array.offsets))<block_start>start=arrow_tokens_arrays_array.offsets[i-1].as_py()<line_sep>stop=arrow_tokens_arrays_array.offsets[i].as_py()<line_sep>arrow_tokens_array=arrow_tokens_arrays_array.values[start:stop]<line_sep># Make an instance of ArrowSpanType <if_stmt>span_type<is><none><block_start>begins_array=arrow_tokens_array.field(ArrowSpanType.BEGINS_NAME)<line_sep>target_text_dict_array=arrow_tokens_array.field(ArrowSpanType.TARGET_TEXT_DICT_NAME)<line_sep>span_type=ArrowSpanType(begins_array.type target_text_dict_array.type)<block_end># Re-make the Arrow extension type to convert back to a SpanArray tokens_array=arrow_to_span(pa.ExtensionArray.from_storage(span_type arrow_tokens_array))<line_sep>tokens_arrays.append(tokens_array)<block_end># Map the token indices to the actual token SpanArray for each element in the TokenSpanArray tokens=[_EMPTY_SPAN_ARRAY_SINGLETON<if>i<is><none><else>tokens_arrays[i]<for>i tokens_indices.to_pylist()]<line_sep># Zero-copy convert arrays to numpy token_begins=token_begins_array.to_numpy()<line_sep>token_ends=token_ends_array.to_numpy()<line_sep><return>TokenSpanArray(tokens token_begins token_ends)<block_end><class_stmt>ArrowTensorType(pa.PyExtensionType)<block_start>""" pyarrow ExtensionType definition for TensorDtype :param element_shape: Fixed shape for each tensor element of the array, the outer dimension is the number of elements, or length, of the array. """<def_stmt>__init__ self element_shape pyarrow_dtype<block_start>self._element_shape=element_shape<line_sep>pa.PyExtensionType.__init__(self pa.list_(pyarrow_dtype))<block_end><def_stmt>__reduce__ self<block_start><return>ArrowTensorType (self._element_shape self.storage_type.value_type)<block_end>@property<def_stmt>shape self<block_start><return>self._element_shape<block_end><def_stmt>__arrow_ext_class__ self<block_start><return>ArrowTensorArray<block_end><block_end><class_stmt>ArrowTensorArray(pa.ExtensionArray)<block_start>""" A batch of tensors with fixed shape. """<def_stmt>__init__ self<block_start><raise>TypeError("Do not call ArrowTensorBatch constructor directly, "<concat>"use one of the `ArrowTensorBatch.from_*` functions "<concat>"instead.")<block_end>@staticmethod<def_stmt>from_numpy obj batch_size=<none><block_start>""" Convert a list of numpy.ndarrays with equal shapes or as single numpy.ndarray with outer-dim as batch size to a pyarrow.Array """<if_stmt>isinstance(obj (list tuple))<block_start><if_stmt>batch_size<is><not><none><block_start><def_stmt>list_gen <block_start><for_stmt>i range(0 len(obj) batch_size)<block_start>slc=obj[i:i+batch_size]<line_sep><yield>ArrowTensorArray.from_numpy(slc batch_size=<none>)<block_end><block_end><return>list_gen()<block_end><elif_stmt>np.isscalar(obj[0])<block_start><return>pa.array(obj)<block_end><elif_stmt>isinstance(obj[0] np.ndarray)# continue with batched ndarray <block_start>obj=np.stack(obj axis=0)<block_end><block_end><if_stmt>isinstance(obj dict)<block_start>names=list(obj.keys())<line_sep>arrs=[ArrowTensorArray.from_numpy(obj[k] batch_size=batch_size)<for>k names]<line_sep>batch=pa.RecordBatch.from_arrays(arrs names)<line_sep><return>pa.Table.from_batches([batch])<block_end><elif_stmt>isinstance(obj np.ndarray)# currently require contiguous ndarray <block_start><if_stmt><not>obj.flags.c_contiguous<block_start>obj=np.ascontiguousarray(obj)<block_end>pa_dtype=pa.from_numpy_dtype(obj.dtype)<line_sep>batch_size=obj.shape[0]<line_sep>element_shape=obj.shape[1:]<line_sep>total_num_elements=obj.size<line_sep>num_elements=1<if>len(obj.shape)<eq>1<else>np.prod(element_shape)<line_sep>child_buf=pa.py_buffer(obj)<line_sep>child_array=pa.Array.from_buffers(pa_dtype total_num_elements [<none> child_buf])<line_sep>offset_buf=pa.py_buffer(np.int32([i<times>num_elements<for>i range(batch_size+1)]))<line_sep>storage=pa.Array.from_buffers(pa.list_(pa_dtype) batch_size [<none> offset_buf] children=[child_array])<line_sep>typ=ArrowTensorType(element_shape pa_dtype)<line_sep><return>pa.ExtensionArray.from_storage(typ storage)<block_end><elif_stmt>np.isscalar(obj)<block_start><return>pa.array([obj])<block_end><else_stmt><block_start><def_stmt>iter_gen <block_start><if_stmt>batch_size<is><none><block_start><for_stmt>d obj<block_start><yield>ArrowTensorArray.from_numpy(d batch_size=batch_size)<block_end><block_end><else_stmt><block_start>batch=[]<for_stmt>o obj<block_start>batch.append(o)<if_stmt>len(batch)<eq>batch_size# merge dict <block_start><if_stmt>isinstance(batch[0] dict)<block_start>d={k:[v]<for>k,v batch[0].items()}<for_stmt>i range(1 len(batch))<block_start><for_stmt>k,v batch[i].items()<block_start>d[k].append(v)<block_end><block_end><for_stmt>k d.keys()<block_start>d[k]=np.stack(d[k] axis=0)<block_end>batch=d<block_end><yield>ArrowTensorArray.from_numpy(batch batch_size=<none>)<line_sep>batch=[]<block_end><block_end><block_end><block_end><return>iter_gen()<block_end><block_end><def_stmt>to_numpy self<block_start>shape=(len(self) )+self.type.shape<line_sep>buf=self.storage.buffers()[3]<line_sep>storage_list_type=self.storage.type<line_sep>ext_dtype=storage_list_type.value_type.to_pandas_dtype()<line_sep><return>np.ndarray(shape buffer=buf dtype=ext_dtype)<block_end><block_end><def_stmt>arrow_to_tensor_array extension_array:pa.ExtensionArray<arrow>TensorArray<block_start>""" Convert a pyarrow.ExtensionArray with type ArrowTensorType to a TensorArray. :param extension_array: pyarrow.ExtensionArray with type ArrowTensorType :return: TensorArray """<if_stmt>isinstance(extension_array pa.ChunkedArray)<block_start><if_stmt>extension_array.num_chunks<g>1# TODO: look into removing concat and constructing from list w/ shape <block_start>values=np.concatenate([chunk.to_numpy()<for>chunk extension_array.iterchunks()])<block_end><else_stmt><block_start>values=extension_array.chunk(0).to_numpy()<block_end><block_end><else_stmt><block_start>values=extension_array.to_numpy()<block_end><return>TensorArray(values)<block_end>
module_attribute="hello!"<def_stmt>extension app<block_start>"""This extension will work"""<line_sep><return>"extension loaded"<block_end><def_stmt>assertive_extension app<block_start>"""This extension won't work"""<assert_stmt><false><block_end>
<import_stmt>numpy<as>np<import_from_stmt>vaex.dataset DatasetArrays<import_from_stmt>vaex.dataset_misc _try_unit<class_stmt>DatasetAstropyTable(DatasetArrays)<block_start><def_stmt>__init__ self filename=<none> format=<none> table=<none> **kwargs<block_start>self.ucds={}<line_sep>self.units={}<line_sep>columns={}<if_stmt>table<is><none><block_start>self.filename=filename<line_sep>self.format=format<line_sep>self.read_table()<block_end><else_stmt><block_start>self.description=table.meta.get("description")<line_sep>self.table=table<block_end><for_stmt>i range(len(self.table.dtype))<block_start>name=self.table.dtype.names[i]<line_sep>column=self.table[name]<line_sep>type=self.table.dtype[i]<if_stmt>type.kind<in>"fiuSU"# only store float and int <block_start>masked_array=self.table[name].data<if_stmt>"ucd"<in>column._meta<block_start>self.ucds[name]=column._meta["ucd"]<block_end><if_stmt>column.unit<block_start>unit=_try_unit(column.unit)<if_stmt>unit<block_start>self.units[name]=unit<block_end><block_end><if_stmt>column.description<block_start>self.descriptions[name]=column.description<block_end><if_stmt>hasattr(masked_array "mask")<block_start><if_stmt>type.kind<in>["f"]<block_start>masked_array.data[masked_array.mask]=np.nan<block_end><if_stmt>type.kind<in>["i"]<block_start>masked_array.data[masked_array.mask]=0<block_end><block_end>columns[name]=self.table[name].data<block_end><if_stmt>type.kind<in>["SU"]<block_start>columns[name]=self.table[name].data<block_end><block_end>super().__init__(columns)<block_end><def_stmt>read_table self<block_start>self.table=astropy.table.Table.read(self.filename format=self.format **kwargs)<block_end><block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>MonitorConfiguration(object)<block_start>""" Details of monitor configuration. """<line_sep>#: A constant which can be used with the config_type property of a MonitorConfiguration. #: This constant has a value of "BROWSER_CONFIG" CONFIG_TYPE_BROWSER_CONFIG="BROWSER_CONFIG"<line_sep>#: A constant which can be used with the config_type property of a MonitorConfiguration. #: This constant has a value of "SCRIPTED_BROWSER_CONFIG" CONFIG_TYPE_SCRIPTED_BROWSER_CONFIG="SCRIPTED_BROWSER_CONFIG"<line_sep>#: A constant which can be used with the config_type property of a MonitorConfiguration. #: This constant has a value of "REST_CONFIG" CONFIG_TYPE_REST_CONFIG="REST_CONFIG"<line_sep>#: A constant which can be used with the config_type property of a MonitorConfiguration. #: This constant has a value of "SCRIPTED_REST_CONFIG" CONFIG_TYPE_SCRIPTED_REST_CONFIG="SCRIPTED_REST_CONFIG"<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new MonitorConfiguration object with values from keyword arguments. This class has the following subclasses and if you are using this class as input to a service operations then you should favor using a subclass over the base class: * :class:`~oci.apm_synthetics.models.ScriptedRestMonitorConfiguration` * :class:`~oci.apm_synthetics.models.ScriptedBrowserMonitorConfiguration` * :class:`~oci.apm_synthetics.models.RestMonitorConfiguration` * :class:`~oci.apm_synthetics.models.BrowserMonitorConfiguration` The following keyword arguments are supported (corresponding to the getters/setters of this class): :param config_type: The value to assign to the config_type property of this MonitorConfiguration. Allowed values for this property are: "BROWSER_CONFIG", "SCRIPTED_BROWSER_CONFIG", "REST_CONFIG", "SCRIPTED_REST_CONFIG", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type config_type: str :param is_failure_retried: The value to assign to the is_failure_retried property of this MonitorConfiguration. :type is_failure_retried: bool """<line_sep>self.swagger_types={'config_type':'str' 'is_failure_retried':'bool'}<line_sep>self.attribute_map={'config_type':'configType' 'is_failure_retried':'isFailureRetried'}<line_sep>self._config_type=<none><line_sep>self._is_failure_retried=<none><block_end>@staticmethod<def_stmt>get_subtype object_dictionary<block_start>""" Given the hash representation of a subtype of this class, use the info in the hash to return the class of the subtype. """<line_sep>type=object_dictionary['configType']<if_stmt>type<eq>'SCRIPTED_REST_CONFIG'<block_start><return>'ScriptedRestMonitorConfiguration'<block_end><if_stmt>type<eq>'SCRIPTED_BROWSER_CONFIG'<block_start><return>'ScriptedBrowserMonitorConfiguration'<block_end><if_stmt>type<eq>'REST_CONFIG'<block_start><return>'RestMonitorConfiguration'<block_end><if_stmt>type<eq>'BROWSER_CONFIG'<block_start><return>'BrowserMonitorConfiguration'<block_end><else_stmt><block_start><return>'MonitorConfiguration'<block_end><block_end>@property<def_stmt>config_type self<block_start>""" Gets the config_type of this MonitorConfiguration. Type of configuration. Allowed values for this property are: "BROWSER_CONFIG", "SCRIPTED_BROWSER_CONFIG", "REST_CONFIG", "SCRIPTED_REST_CONFIG", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The config_type of this MonitorConfiguration. :rtype: str """<line_sep><return>self._config_type<block_end>@config_type.setter<def_stmt>config_type self config_type<block_start>""" Sets the config_type of this MonitorConfiguration. Type of configuration. :param config_type: The config_type of this MonitorConfiguration. :type: str """<line_sep>allowed_values=["BROWSER_CONFIG" "SCRIPTED_BROWSER_CONFIG" "REST_CONFIG" "SCRIPTED_REST_CONFIG"]<if_stmt><not>value_allowed_none_or_none_sentinel(config_type allowed_values)<block_start>config_type='UNKNOWN_ENUM_VALUE'<block_end>self._config_type=config_type<block_end>@property<def_stmt>is_failure_retried self<block_start>""" Gets the is_failure_retried of this MonitorConfiguration. If isFailureRetried is enabled, then a failed call will be retried. :return: The is_failure_retried of this MonitorConfiguration. :rtype: bool """<line_sep><return>self._is_failure_retried<block_end>@is_failure_retried.setter<def_stmt>is_failure_retried self is_failure_retried<block_start>""" Sets the is_failure_retried of this MonitorConfiguration. If isFailureRetried is enabled, then a failed call will be retried. :param is_failure_retried: The is_failure_retried of this MonitorConfiguration. :type: bool """<line_sep>self._is_failure_retried=is_failure_retried<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
<import_stmt>json<import_stmt>requests<class_stmt>RPCProxy<block_start><def_stmt>__init__ self client name<block_start>self.client=client<line_sep>self.name=name<block_end><def_stmt>__call__ self **kwargs<block_start>resp=self.client.session.post(self.client.endpoint data=json.dumps(kwargs) headers={'X-Rpc-Action':self.name 'Content-Type':'application/json' } )<line_sep><return>resp.json()<block_end><block_end><class_stmt>RPCClient<block_start><def_stmt>__init__ self endpoint<block_start>self.endpoint=endpoint<line_sep>self.session=requests.Session()<block_end><def_stmt>__getattr__ self key<block_start><return>RPCProxy(self key)<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>btagMC_QCD_380_470=cms.EDFilter("BTagSkimMC" mcProcess=cms.string('QCD') pthat_min=cms.double(380.0) verbose=cms.untracked.bool(<false>) pthat_max=cms.double(470.0))<line_sep>
<import_stmt>sshpubkeys<import_stmt>unittest<class_stmt>UtilTestCase(unittest.TestCase)<block_start><def_stmt>test_ssh_key_bits_and_fingerprint self<block_start><with_stmt>self.assertRaises(sshpubkeys.InvalidKeyException)<block_start>sshpubkeys.SSHKey('an invalid key string')<block_end>valid_ssh_key='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3uta/x/kAwbs2G7AOUQtRG7l1hjEws4mrvnTZmwICoGNi+TUwxerZgMbBBID7Kpza/ZSUqXpKX5gppRW9zECBsbJ+2D0ch/oVSZ408aUE6ePNzJilLA/2wtRct/bkHDZOVI+iwEEr1IunjceF+ZQxnylUv44C6SgZvrDj+38hz8z1Vf4BtW5jGOhHkddTadU7Nn4jQR3aFXMoheuu/vHYD2OyDJj/r6vh9x5ey8zFmwsGDtFCCzzLgcfPYfOdDxFIWhsopebnH3QHVcs/E0KqhocsEdFDRvcFgsDCKwmtHyZVAOKym2Pz9TfnEdGeb+eKrleZVsApFrGtSIfcf4pH user@host'<line_sep>ssh_key=sshpubkeys.SSHKey(valid_ssh_key)<line_sep>self.assertEqual(ssh_key.bits 2048)<line_sep>self.assertEqual(ssh_key.hash() '73:e7:0c:60:7b:d2:7b:df:81:2e:c2:57:54:53:81:91')<block_end><block_end>
<import_stmt>cv2<line_sep>img=cv2.imread('./images/input.jpg')<line_sep>cv2.imshow('Input image' img)<line_sep>cv2.waitKey()<line_sep>
<import_from_stmt>flask current_app jsonify request Response<import_from_stmt>flask.views View<import_from_stmt>sqlalchemy.orm joinedload<import_from_stmt>zeus auth<import_from_stmt>zeus.api.resources.base ApiHelpers<import_from_stmt>zeus.config nplusone<import_from_stmt>zeus.constants Permission<import_from_stmt>zeus.exceptions ApiError<import_from_stmt>zeus.models Hook<class_stmt>BaseHook(View ApiHelpers)<block_start>public=<false><line_sep>methods=["GET" "POST" "PUT" "DELETE"]<def_stmt>dispatch_request self hook_id signature=<none> *args **kwargs<arrow>Response<block_start>current_app.logger.info("received webhook id=%s" hook_id)<with_stmt>nplusone.ignore("eager_load")<block_start>hook=(Hook.query.unrestricted_unsafe().options(joinedload("repository")).get(hook_id))<block_end><if_stmt><not>hook<block_start><return>self.respond({"message":"hook not found"} 404)<block_end><if_stmt><not>self.public<and><not>hook.is_valid_signature(signature)<block_start>current_app.logger.warn("invalid webhook signature id=%s" hook_id)<line_sep><return>self.respond({"message":"hook not found"} 404)<block_end><try_stmt><block_start>method=getattr(self request.method.lower())<block_end><except_stmt>AttributeError<block_start>current_app.logger.warn("invalid webhook method id=%s, method=%s" hook_id request.method)<line_sep><return>self.respond({"message":"resource not found"} 405)<block_end>auth.set_current_tenant(auth.RepositoryTenant(repository_id=hook.repository_id permission=Permission.admin))<try_stmt><block_start>resp=method(hook *args **kwargs)<block_end><except_stmt>ApiError<as>exc<block_start><return>self.respond(exc.json<or>{} exc.code<or>500)<block_end><if_stmt>isinstance(resp Response)<block_start><return>resp<block_end><return>self.respond(resp)<block_end><def_stmt>respond self context:dict={} status:int=200<arrow>Response<block_start>resp=jsonify(context)<line_sep>resp.status_code=status<line_sep><return>resp<block_end><block_end>
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>math<import_stmt>paddle<def_stmt>piecewise_decay net device_num args<block_start>step=int(math.ceil(float(args.total_images)/(args.batch_size<times>device_num)))<line_sep>bd=[step<times>e<for>e args.step_epochs]<line_sep>lr=[args.lr<times>(0.1<power>i)<for>i range(len(bd)+1)]<line_sep>learning_rate=paddle.optimizer.lr.PiecewiseDecay(boundaries=bd values=lr verbose=<false>)<line_sep>optimizer=paddle.optimizer.Momentum(parameters=net.parameters() learning_rate=learning_rate momentum=args.momentum_rate weight_decay=paddle.regularizer.L2Decay(args.l2_decay))<line_sep><return>optimizer learning_rate<block_end><def_stmt>cosine_decay net device_num args<block_start>step=int(math.ceil(float(args.total_images)/(args.batch_size<times>device_num)))<line_sep>learning_rate=paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=args.lr T_max=step<times>args.num_epochs verbose=<false>)<line_sep>optimizer=paddle.optimizer.Momentum(parameters=net.parameters() learning_rate=learning_rate momentum=args.momentum_rate weight_decay=paddle.regularizer.L2Decay(args.l2_decay))<line_sep><return>optimizer learning_rate<block_end><def_stmt>create_optimizer net device_num args<block_start><if_stmt>args.lr_strategy<eq>"piecewise_decay"<block_start><return>piecewise_decay(net device_num args)<block_end><elif_stmt>args.lr_strategy<eq>"cosine_decay"<block_start><return>cosine_decay(net device_num args)<block_end><block_end>
<import_from_stmt>autoPyTorch.core.autonet_classes.autonet_image_classification AutoNetImageClassification<class_stmt>AutoNetImageClassificationMultipleDatasets(AutoNetImageClassification)<block_start>preset_folder_name="image_classification_multiple_datasets"<line_sep>@classmethod<def_stmt>get_default_pipeline cls<block_start><import_from_stmt>autoPyTorch.pipeline.base.pipeline Pipeline<import_from_stmt>autoPyTorch.pipeline.nodes.image.optimization_algorithm_no_timelimit OptimizationAlgorithmNoTimeLimit<import_from_stmt>autoPyTorch.pipeline.nodes.optimizer_selector OptimizerSelector<import_from_stmt>autoPyTorch.pipeline.nodes.log_functions_selector LogFunctionsSelector<import_from_stmt>autoPyTorch.pipeline.nodes.metric_selector MetricSelector<import_from_stmt>autoPyTorch.pipeline.nodes.image.simple_scheduler_selector SimpleLearningrateSchedulerSelector<import_from_stmt>autoPyTorch.pipeline.nodes.image.cross_validation_indices CrossValidationIndices<import_from_stmt>autoPyTorch.pipeline.nodes.image.autonet_settings_no_shuffle AutoNetSettingsNoShuffle<import_from_stmt>autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo NetworkSelectorDatasetInfo<import_from_stmt>autoPyTorch.pipeline.nodes.image.loss_module_selector_indices LossModuleSelectorIndices<import_from_stmt>autoPyTorch.pipeline.nodes.image.image_augmentation ImageAugmentation<import_from_stmt>autoPyTorch.pipeline.nodes.image.create_image_dataloader CreateImageDataLoader<import_from_stmt>autoPyTorch.pipeline.nodes.image.create_dataset_info CreateDatasetInfo<import_from_stmt>autoPyTorch.pipeline.nodes.image.simple_train_node SimpleTrainNode<import_from_stmt>autoPyTorch.pipeline.nodes.image.multiple_datasets MultipleDatasets<import_from_stmt>autoPyTorch.pipeline.nodes.image.image_dataset_reader ImageDatasetReader<line_sep># build the pipeline pipeline=Pipeline([AutoNetSettingsNoShuffle() OptimizationAlgorithmNoTimeLimit([MultipleDatasets([ImageDatasetReader() CreateDatasetInfo() CrossValidationIndices([NetworkSelectorDatasetInfo() OptimizerSelector() SimpleLearningrateSchedulerSelector() LogFunctionsSelector() MetricSelector() LossModuleSelectorIndices() ImageAugmentation() CreateImageDataLoader() SimpleTrainNode()])])])])<line_sep>cls._apply_default_pipeline_settings(pipeline)<line_sep><return>pipeline<block_end><block_end>
<import_from_stmt>vtkmodules.vtkCommonCore vtkCommand<class_stmt>vtkErrorObserver(object)<block_start><def_stmt>__init__ self<block_start>self.CallDataType='string0'<line_sep>self.reset()<block_end><def_stmt>__call__ self caller event data<block_start><if_stmt>event<eq>'ErrorEvent'<block_start>self._error_message=data<block_end><elif_stmt>event<eq>'WarningEvent'<block_start>self._warning_message=data<block_end><block_end><def_stmt>_check self seen actual expect what<block_start><if_stmt>seen<block_start><if_stmt>actual.find(expect)<eq>-1<block_start>msg='ERROR: %s message does not contain "%s" got \n"%s"'%(what expect self.error_message)<line_sep><raise>RuntimeError(msg)<block_end><block_end><else_stmt><block_start>what=what.lower()<line_sep>msg='ERROR: Failed to catch any %s. '<concat>'Expected the %s message to contain "%s"'%(what what expect)<line_sep><raise>RuntimeError(msg)<block_end>self.reset()<block_end><def_stmt>check_error self expect<block_start>self._check(self.saw_error self.error_message expect 'Error')<block_end><def_stmt>check_warning self expect<block_start>self._check(self.saw_warning self.warning_message expect 'Warning')<block_end><def_stmt>reset self<block_start>self._error_message=<none><line_sep>self._warning_message=<none><block_end>@property<def_stmt>saw_error self<block_start><return>self._error_message<is><not><none><block_end>@property<def_stmt>error_message self<block_start><return>self._error_message<block_end>@property<def_stmt>saw_warning self<block_start><return>self._warning_message<is><not><none><block_end>@property<def_stmt>warning_message self<block_start><return>self._warning_message<block_end><block_end>
<import_stmt>re<import_from_stmt>cort.core external_data<import_from_stmt>cort.core spans<import_from_stmt>cort.core util<line_sep>__author__='smartschat'<def_stmt>not_singleton anaphor antecedent<block_start>singleton_data=external_data.SingletonMentions.get_instance()<line_sep>anaphor=" ".join(anaphor.attributes["tokens"])<line_sep>antecedent=" ".join(antecedent.attributes["tokens"])<if_stmt>(anaphor<in>singleton_data.singletons<and>singleton_data.singletons[anaphor]<ge>25)<block_start><return><true><block_end><if_stmt>(antecedent<in>singleton_data.singletons<and>singleton_data.singletons[antecedent]<ge>25)<block_start><return><true><block_end><block_end><def_stmt>pronoun_parallelism anaphor antecedent<block_start><return>(anaphor.attributes["type"]<eq>"PRO"<and>(anaphor.attributes["citation_form"]<in>["he" "she" "it" "they"])<and>(antecedent.attributes["type"]<ne>"PRO"<or>(antecedent.attributes["citation_form"]<in>["he" "she" "it" "they"]))<and>(antecedent.attributes["grammatical_function"]<eq>anaphor.attributes["grammatical_function"])<and>(antecedent.attributes["grammatical_function"]<in>["SUBJECT" "OBJECT"]))<block_end><def_stmt>antecedent_is_subject anaphor antecedent<block_start><return>(anaphor.attributes["type"]<eq>"PRO"<and>(anaphor.attributes["citation_form"]<in>["he" "she" "it" "they"])<and>(antecedent.attributes["type"]<ne>"PRO"<or>(antecedent.attributes["citation_form"]<in>["he" "she" "it" "they"]))<and>antecedent.attributes["grammatical_function"]<eq>"SUBJECT")<block_end><def_stmt>antecedent_is_object anaphor antecedent<block_start><return>(anaphor.attributes["type"]<eq>"PRO"<and>(anaphor.attributes["citation_form"]<in>["he" "she" "it" "they"])<and>(antecedent.attributes["type"]<ne>"PRO"<or>(antecedent.attributes["citation_form"]<in>["he" "she" "it" "they"]))<and>antecedent.attributes["grammatical_function"]<eq>"OBJECT")<block_end><def_stmt>anaphor_pronoun anaphor antecedent<block_start><return>(anaphor.attributes["type"]<eq>"PRO"<and>(anaphor.attributes["citation_form"]<in>["he" "she" "it" "they"])<and>(antecedent.attributes["type"]<ne>"PRO"<or>(antecedent.attributes["citation_form"]<in>["he" "she" "it" "they"])))<block_end><def_stmt>lexical anaphor antecedent<block_start>lexical_data=external_data.LexicalData.get_instance()<if_stmt>((anaphor.attributes["type"]<eq>"NAM"<and>antecedent.attributes["type"]<eq>"NAM")<or>(anaphor.attributes["type"]<eq>"NOM"<and>anaphor.attributes["fine_type"]<eq>"DEF"<and>antecedent.attributes["type"]<in>["NAM" "NOM"]))<block_start><return>lexical_data.look_up(anaphor antecedent)<block_end><block_end><def_stmt>non_pronominal_string_match anaphor antecedent<block_start><if_stmt>anaphor.attributes["type"]<in>["PRO" "DEM" "VRB"]<block_start><return><false><block_end><elif_stmt>antecedent.attributes["type"]<in>["PRO" "DEM" "VRB"]<block_start><return><false><block_end><else_stmt><block_start><return>(" ".join(util.clean_via_pos(anaphor.attributes["tokens"] anaphor.attributes["pos"])).lower()<eq>" ".join(util.clean_via_pos(antecedent.attributes["tokens"] antecedent.attributes["pos"])).lower())<block_end><block_end><def_stmt>head_match anaphor antecedent<block_start><if_stmt>anaphor.attributes["type"]<in>["PRO" "DEM" "VRB"]<block_start><return><false><block_end><elif_stmt>antecedent.attributes["type"]<in>["PRO" "DEM" "VRB"]<block_start><return><false><block_end><elif_stmt>(anaphor.attributes["semantic_class"]<eq>"NUMERIC"<or>antecedent.attributes["semantic_class"]<eq>"NUMERIC")<block_start><return><false><block_end><else_stmt><block_start><return>(anaphor.attributes["head"]<ne>["and"]<and>(" ".join(anaphor.attributes["head"]).lower()<eq>" ".join(antecedent.attributes["head"]).lower()))<block_end><block_end><def_stmt>substring anaphor antecedent<block_start><if_stmt>anaphor.attributes["type"]<in>["PRO" "DEM" "VRB"]<block_start><return><false><block_end><elif_stmt>antecedent.attributes["type"]<ne>"NAM"<block_start><return><false><block_end><elif_stmt>(anaphor.attributes["semantic_class"]<eq>"NUMERIC"<or>antecedent.attributes["semantic_class"]<eq>"NUMERIC")<block_start><return><false><block_end><elif_stmt>anaphor.attributes["head"]<eq>["and"]<block_start><return><false><block_end><else_stmt><block_start>cleaned=util.clean_via_pos(anaphor.attributes["tokens"] anaphor.attributes["pos"])<line_sep><return>(" ".join(cleaned).lower()<in>" ".join(antecedent.attributes["tokens"]).lower())<block_end><block_end><def_stmt>pronoun_same_canonical_form anaphor antecedent<block_start><return>(anaphor.attributes["type"]<eq>"PRO"<and>antecedent.attributes["type"]<eq>"PRO"<and>(anaphor.attributes["citation_form"]<eq>antecedent.attributes["citation_form"]))<block_end><def_stmt>speaker anaphor antecedent<block_start>speaker_anaphor=anaphor.attributes["speaker"]<line_sep>speaker_antecedent=antecedent.attributes["speaker"]<if_stmt>speaker_anaphor<eq>"-"<and>speaker_antecedent<eq>"-"<block_start><return><false><block_end><else_stmt><block_start><if_stmt>(anaphor.attributes["type"]<eq>"PRO"<and>antecedent.attributes["type"]<eq>"PRO")<block_start><if_stmt>(anaphor.attributes["citation_form"]<eq>"i"<and>antecedent.attributes["citation_form"]<eq>"i")<block_start><return>speaker_anaphor<eq>speaker_antecedent<block_end><elif_stmt>((anaphor.attributes["citation_form"]<eq>"i"<and>antecedent.attributes["citation_form"]<eq>"you")<or>(anaphor.attributes["citation_form"]<eq>"you"<and>antecedent.attributes["citation_form"]<eq>"i"))<block_start><return>(nothing_between(anaphor antecedent)<and>speaker_anaphor<ne>speaker_antecedent)<block_end><block_end><elif_stmt>(anaphor.attributes["type"]<eq>"PRO"<or>antecedent.attributes["type"]<eq>"PRO")<block_start><if_stmt>(anaphor.attributes["type"]<eq>"PRO"<and>anaphor.attributes["citation_form"]<eq>"i")<block_start><return>(speaker_anaphor.replace("_" " ").lower()<in>[" ".join(antecedent.attributes["tokens"]).lower() " ".join(antecedent.attributes["head"]).lower()])<block_end><elif_stmt>(antecedent.attributes["type"]<eq>"PRO"<and>antecedent.attributes["citation_form"]<eq>"i")<block_start><return>(speaker_antecedent.replace("_" " ").lower()<in>[" ".join(anaphor.attributes["tokens"]).lower() " ".join(anaphor.attributes["head"]).lower()])<block_end><block_end><block_end><block_end><def_stmt>nothing_between anaphor antecedent<block_start><if_stmt><not>anaphor.document<block_start><return><true><block_end><if_stmt>anaphor.span<l>antecedent.span<block_start>start=anaphor.span.begin<line_sep>end=antecedent.span.end<block_end><else_stmt><block_start>start=antecedent.span.begin<line_sep>end=anaphor.span.end<block_end>speakers=anaphor.document.speakers[start:end+1]<line_sep>allowed_speakers=[speakers[0] speakers[-1]]<for_stmt>particular_speaker speakers<block_start><if_stmt>particular_speaker<not><in>allowed_speakers<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>not_anaphoric anaphor antecedent<block_start><return><not>(anaphor.attributes["type"]<in>["NAM" "PRO"]<or>(anaphor.attributes["type"]<eq>"NOM"<and>anaphor.attributes["fine_type"]<eq>"DEF"))<block_end><def_stmt>not_speaker anaphor antecedent<block_start>speaker_anaphor=anaphor.attributes["speaker"]<line_sep>speaker_antecedent=antecedent.attributes["speaker"]<if_stmt>speaker_anaphor<eq>"-"<or>speaker_antecedent<eq>"-"<block_start><return><false><block_end><else_stmt><block_start><if_stmt>(anaphor.attributes["type"]<eq>"PRO"<and>antecedent.attributes["type"]<eq>"PRO")<block_start><if_stmt>((anaphor.attributes["citation_form"]<eq>"i"<and>antecedent.attributes["citation_form"]<eq>"i")<or>(anaphor.attributes["citation_form"]<eq>"we"<and>antecedent.attributes["citation_form"]<eq>"we")<or>(anaphor.attributes["citation_form"]<eq>"you"<and>antecedent.attributes["citation_form"]<eq>"you"))<block_start><return>speaker_anaphor<ne>speaker_antecedent<block_end><elif_stmt>((anaphor.attributes["citation_form"]<eq>"i"<and>antecedent.attributes["citation_form"]<eq>"you")<or>(anaphor.attributes["citation_form"]<eq>"you"<and>antecedent.attributes["citation_form"]<eq>"i"))<block_start><return>speaker_anaphor<eq>speaker_antecedent<block_end><block_end><block_end><block_end><def_stmt>not_pronoun_distance anaphor antecedent<block_start><return>(anaphor.attributes["type"]<eq>"PRO"<and>anaphor.attributes["citation_form"]<eq>"it"<and>(anaphor.attributes["sentence_id"]-antecedent.attributes["sentence_id"]<g>1))<block_end><def_stmt>not_embedding anaphor antecedent<block_start><return>(antecedent.span.embeds(anaphor.span)<and>(anaphor.attributes["fine_type"]<not><in>["REFL" "POSS" "POSS_ADJ"]))<block_end><def_stmt>not_compatible anaphor antecedent<block_start><if_stmt>(" ".join(util.clean_via_pos(anaphor.attributes["tokens"] anaphor.attributes["pos"])).lower()<eq>" ".join(util.clean_via_pos(antecedent.attributes["tokens"] antecedent.attributes["pos"])).lower())<block_start><return><false><block_end>gender=(anaphor.attributes["gender"]<eq>"UNKNOWN"<or>antecedent.attributes["gender"]<eq>"UNKNOWN"<or>anaphor.attributes["gender"]<eq>antecedent.attributes["gender"])<line_sep>number=(anaphor.attributes["number"]<eq>"UNKNOWN"<or>antecedent.attributes["number"]<eq>"UNKNOWN"<or>anaphor.attributes["number"]<eq>antecedent.attributes["number"])<line_sep>semantic_class=(anaphor.attributes["semantic_class"]<eq>"UNKNOWN"<or>antecedent.attributes["semantic_class"]<eq>"UNKNOWN"<or>anaphor.attributes["semantic_class"]<eq>antecedent.attributes["semantic_class"])<line_sep><return><not>(gender<and>number<and>semantic_class)<block_end><def_stmt>not_modifier anaphor antecedent<block_start><if_stmt>(anaphor.attributes["type"]<eq>"NAM"<and>antecedent.attributes["type"]<eq>"NAM")<block_start><return><false><block_end><elif_stmt>(anaphor.attributes["type"]<in>["PRO" "DEM" "VRB"]<or>antecedent.attributes["type"]<in>["PRO" "DEM" "VRB"])<block_start><return><false><block_end><else_stmt><block_start><return><not>get_modifier(anaphor).issubset(get_modifier(antecedent))<block_end><block_end><def_stmt>get_modifier mention<block_start>head_span_in_mention=spans.Span(mention.attributes["head_span"].begin-mention.span.begin mention.attributes["head_span"].end-mention.span.begin)<line_sep>modifiers=set()<for_stmt>index,(token pos) enumerate(zip(mention.attributes["tokens"] mention.attributes["pos"]))<block_start><if_stmt>(token.lower()<not><in>["the" "this" "that" "those" "these" "a" "an"]<and>pos<not><in>["POS" "IN"]<and>(index<l>head_span_in_mention.begin<or>index<g>head_span_in_mention.end))<block_start>modifiers.add(token.lower())<block_end><block_end><return>modifiers<block_end><def_stmt>alias anaphor antecedent<block_start><if_stmt>(anaphor.attributes["type"]<ne>"NAM"<or>antecedent.attributes["type"]<ne>"NAM")<block_start><return><false><block_end><elif_stmt>(" ".join(anaphor.attributes["head"]).lower()<eq>" ".join(antecedent.attributes["head"]).lower())<block_start><return><false><block_end><else_stmt><block_start>anaphor_cleaned_tokens=anaphor.attributes["head"]<line_sep>antecedent_cleaned_tokens=antecedent.attributes["head"]<line_sep>category=get_category_for_alias(anaphor.attributes["ner"][anaphor.attributes["head_index"]] antecedent.attributes["ner"][antecedent.attributes["head_index"]])<if_stmt>category<eq>"PERSON"<block_start><return>person_alias(anaphor_cleaned_tokens antecedent_cleaned_tokens)<block_end><elif_stmt>category<eq>"LOC"<block_start><return>loc_alias(anaphor_cleaned_tokens antecedent_cleaned_tokens)<block_end><elif_stmt>category<eq>"ORG"<block_start><return>org_alias(anaphor_cleaned_tokens antecedent_cleaned_tokens)<block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><def_stmt>get_category_for_alias anaphor_ner antecedent_ner<block_start><if_stmt>anaphor_ner<eq>"PERSON"<and>antecedent_ner<eq>"PERSON"<block_start><return>"PERSON"<block_end><elif_stmt>re.match(r"LOC" anaphor_ner)<and>re.match(r"LOC" antecedent_ner)<block_start><return>"LOC"<block_end><elif_stmt>re.match(r"ORG" anaphor_ner)<and>re.match(r"(ORG)" antecedent_ner)<block_start><return>"ORG"<block_end><block_end><def_stmt>loc_alias anaphor_cleaned_tokens antecedent_cleaned_tokens<block_start><return>(starts_with(anaphor_cleaned_tokens antecedent_cleaned_tokens)<or>is_abbreviation(anaphor_cleaned_tokens antecedent_cleaned_tokens))<block_end><def_stmt>org_alias anaphor_cleaned_tokens antecedent_cleaned_tokens<block_start><return>(starts_with(anaphor_cleaned_tokens antecedent_cleaned_tokens)<or>is_abbreviation(anaphor_cleaned_tokens antecedent_cleaned_tokens))<block_end><def_stmt>person_alias anaphor_cleaned_tokens antecedent_cleaned_tokens<block_start><if_stmt>len(anaphor_cleaned_tokens)<eq>1<or>len(antecedent_cleaned_tokens)<eq>1<block_start><return>(anaphor_cleaned_tokens[0]<eq>antecedent_cleaned_tokens[0]<or>anaphor_cleaned_tokens[-1]<eq>antecedent_cleaned_tokens[-1])<block_end><elif_stmt>(len(anaphor_cleaned_tokens)<eq>2<and>anaphor_cleaned_tokens[0].lower()<in>["mr" "ms" "mr." "ms."]<or>len(antecedent_cleaned_tokens)<eq>2<and>antecedent_cleaned_tokens[0].lower()<in>["mr" "ms" "mr." "ms."])<block_start><return>anaphor_cleaned_tokens[-1]<eq>antecedent_cleaned_tokens[-1]<block_end><elif_stmt>(anaphor_cleaned_tokens[0]<eq>antecedent_cleaned_tokens[0]<and>anaphor_cleaned_tokens[-1]<eq>antecedent_cleaned_tokens[-1])<block_start><return><true><block_end><elif_stmt>len(anaphor_cleaned_tokens)<g>1<and>len(antecedent_cleaned_tokens)<g>1<block_start><return>(anaphor_cleaned_tokens[-1]<eq>antecedent_cleaned_tokens[-1]<and>anaphor_cleaned_tokens[-2]<eq>antecedent_cleaned_tokens[-2])<block_end><return><false><block_end><def_stmt>starts_with anaphor_cleaned_tokens antecedent_cleaned_tokens<block_start><for_stmt>ana_token,ante_token zip(anaphor_cleaned_tokens antecedent_cleaned_tokens)<block_start><if_stmt>ana_token<ne>ante_token<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>is_abbreviation anaphor_cleaned_tokens antecedent_cleaned_tokens<block_start><if_stmt>(" ".join(anaphor_cleaned_tokens).replace("." "")<eq>" ".join(antecedent_cleaned_tokens).replace("." ""))<block_start><return><true><block_end><else_stmt><block_start><if_stmt>len(anaphor_cleaned_tokens)<g>len(antecedent_cleaned_tokens)<block_start><return>(" ".join(antecedent_cleaned_tokens)<in>set(get_acronyms(anaphor_cleaned_tokens)))<block_end><else_stmt><block_start><return>(" ".join(anaphor_cleaned_tokens)<in>set(get_acronyms(antecedent_cleaned_tokens)))<block_end><block_end><block_end><def_stmt>get_acronyms cleaned_tokens<block_start>company_designator=r'assoc|bros|co|coop|corp|devel|inc|llc|ltd\.?'<line_sep>tokens_without_designator=[token<for>token cleaned_tokens<if><not>re.match(company_designator token.lower())]<line_sep><return>(" ".join(tokens_without_designator) "".join([token[0]<for>token tokens_without_designator<if>token[0].isupper()]) ".".join([token[0]<for>token tokens_without_designator<if>token[0].isupper()])+".")<block_end>
<import_from_stmt>flask Flask jsonify request<import_from_stmt>flask_cors CORS<import_stmt>torch<import_from_stmt>transformers AutoTokenizer AutoConfig<import_from_stmt>modeling BertForSentimentClassification AlbertForSentimentClassification DistilBertForSentimentClassification<import_from_stmt>arguments args<line_sep>app=Flask(__name__)<line_sep>app.config.from_object(__name__)<line_sep># Enable CORS CORS(app resources={r'/*':{'origins':'*'}})<def_stmt>classify_sentiment sentence<block_start><with_stmt>torch.no_grad()<block_start>tokens=tokenizer.tokenize(sentence)<line_sep>tokens=['[CLS]']+tokens+['[SEP]']<line_sep>tokens_ids=tokenizer.convert_tokens_to_ids(tokens)<line_sep>seq=torch.tensor(tokens_ids)<line_sep>seq=seq.unsqueeze(0)<line_sep>attn_mask=(seq<ne>0).long()<line_sep>logit=model(seq attn_mask)<line_sep>prob=torch.sigmoid(logit.unsqueeze(-1))<line_sep>prob=prob.item()<line_sep>soft_prob=prob<g>0.5<if_stmt>soft_prob<eq>1<block_start><return>'Positive' int(prob<times>100)<block_end><else_stmt><block_start><return>'Negative' int(100-prob<times>100)<block_end><block_end><block_end>@app.route('/' methods=['GET'])<def_stmt>sentiment <block_start><if_stmt>request.method<eq>'GET'<block_start>text=request.args['text']<line_sep>sentiment,probability=classify_sentiment(text)<line_sep><return>jsonify({'sentiment':sentiment 'probability':probability})<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>args.model_name_or_path<is><none><block_start>args.model_name_or_path='barissayil/bert-sentiment-analysis-sst'<block_end>#Configuration for the desired transformer model config=AutoConfig.from_pretrained(args.model_name_or_path)<line_sep>#Create the model with the desired transformer model <if_stmt>config.model_type<eq>'bert'<block_start>model=BertForSentimentClassification.from_pretrained(args.model_name_or_path)<block_end><elif_stmt>config.model_type<eq>'albert'<block_start>model=AlbertForSentimentClassification.from_pretrained(args.model_name_or_path)<block_end><elif_stmt>config.model_type<eq>'distilbert'<block_start>model=DistilBertForSentimentClassification.from_pretrained(args.model_name_or_path)<block_end><else_stmt><block_start><raise>ValueError('This transformer model is not supported yet.')<block_end>device=torch.device("cuda:0"<if>torch.cuda.is_available()<else>"cpu")<line_sep>model=model.to(device)<line_sep>model.eval()<line_sep>#Initialize the tokenizer for the desired transformer model tokenizer=AutoTokenizer.from_pretrained(args.model_name_or_path)<line_sep>#Run the Flask App app.run()<block_end>
# ----------------------------------------------------------------------------- # Copyright (c) 2009-2016 <NAME>. All rights reserved. # Distributed under the (new) BSD License. # ----------------------------------------------------------------------------- """ Default argument parser for any glumpy program. """<import_stmt>argparse<import_stmt>glumpy.defaults<line_sep># Default parser __parser__=<none><def_stmt>get_default <block_start>""" Get the default parser. """<line_sep><global>__parser__<if_stmt>__parser__<is><none><block_start>__parser__=argparse.ArgumentParser()<line_sep>set_default_options(__parser__)<block_end><return>__parser__<block_end><def_stmt>get_options <block_start>""" Parse and retrun command line options. """<line_sep>options,unknown=get_default().parse_known_args()<line_sep><return>options<block_end><def_stmt>set_default_options parser<block_start>""" Set parser default options. """<line_sep># Backend option parser.add_argument("--backend" "-b" default=glumpy.defaults.backend() choices=('glfw' 'sdl2' 'qt5' 'pyside' 'pyglet' 'sdl' 'freeglut' 'osxglut') help="Backend to use, one of ")<line_sep># Record parser.add_argument("--record" action='store_true' help='Record a movie (default is "movie.mp4")')<line_sep># Interactive mode parser.add_argument("--interactive" "-i" action='store_true' help="Interactive mode")<line_sep># Framerate option parser.add_argument("--framerate" "-f" default=60 type=int help="Framerate in frames/second")<line_sep># Display framerate option parser.add_argument("--display-fps" action='store_true' help="Display framerate in the console")<line_sep># Framerate option parser.add_argument("--debug" "-d" action='store_true' help="Verbose debug mode")<line_sep># Window size parser.add_argument("--size" "-s" default="" type=str help="Window size")<line_sep># Window position parser.add_argument("--position" "-p" default="" type=str help="Window position")<line_sep># Single buffer parser.add_argument("--single-buffer" action='store_true' help="Single buffer mode")<line_sep># Stereo mode parser.add_argument("--stereo" action='store_true' help="Stereo mode")<line_sep># vertical synchronization parser.add_argument("--vsync" default=<false> type=bool help="Enable/disable vertical synchronization")<line_sep># sRGB mode parser.add_argument("--srgb" action='store_true' help="sRGB mode (gamma correction)")<line_sep># Depth buffer size parser.add_argument("--depth-size" default=16 type=int help="Depth buffer size")<line_sep># Stencil buffer size parser.add_argument("--stencil-size" default=0 type=int help="Stencil buffer size")<line_sep># GL API parser.add_argument("--gl-api" default="GL" choices=["GL" "ES"] help="GL API")<line_sep># GL profile parser.add_argument("--gl-profile" default="none" choices=["none" "core" "compatibility"] help="GL context profile (only relevant for GL > 3.0)")<line_sep># GL version parser.add_argument("--gl-version" default="2.1" help="GL version")<block_end>
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>math<line_sep>LEARNING_RATE=0.0001<line_sep>BATCH_SIZE=64<line_sep>TAU=0.001<class_stmt>ActorNet<block_start>""" Actor Network Model of DDPG Algorithm """<def_stmt>__init__ self num_states num_actions<block_start>self.g=tf.Graph()<with_stmt>self.g.as_default()<block_start>self.sess=tf.InteractiveSession()<line_sep># actor network model parameters: self.W1_a,self.B1_a,self.W2_a,self.B2_a,self.W3_a,self.B3_a,self.actor_state_in,self.actor_model=self.create_actor_net(num_states num_actions)<line_sep># target actor network model parameters: self.t_W1_a,self.t_B1_a,self.t_W2_a,self.t_B2_a,self.t_W3_a,self.t_B3_a,self.t_actor_state_in,self.t_actor_model=self.create_actor_net(num_states num_actions)<line_sep># cost of actor network: # gets input from action_gradient computed in critic network file self.q_gradient_input=tf.placeholder("float" [<none> num_actions])<line_sep>self.actor_parameters=[self.W1_a self.B1_a self.W2_a self.B2_a self.W3_a self.B3_a]<line_sep>self.parameters_gradients=tf.gradients(self.actor_model self.actor_parameters -self.q_gradient_input)<line_sep># /BATCH_SIZE) self.optimizer=tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(zip(self.parameters_gradients self.actor_parameters))<line_sep># initialize all tensor variable parameters: self.sess.run(tf.global_variables_initializer())<line_sep>self.update_target_actor_op=[self.t_W1_a.assign(TAU<times>self.W1_a+(1-TAU)<times>self.t_W1_a) self.t_B1_a.assign(TAU<times>self.B1_a+(1-TAU)<times>self.t_B1_a) self.t_W2_a.assign(TAU<times>self.W2_a+(1-TAU)<times>self.t_W2_a) self.t_B2_a.assign(TAU<times>self.B2_a+(1-TAU)<times>self.t_B2_a) self.t_W3_a.assign(TAU<times>self.W3_a+(1-TAU)<times>self.t_W3_a) self.t_B3_a.assign(TAU<times>self.B3_a+(1-TAU)<times>self.t_B3_a)]<line_sep># To make sure actor and target have same intial parmameters copy the parameters: # copy target parameters self.sess.run([self.t_W1_a.assign(self.W1_a) self.t_B1_a.assign(self.B1_a) self.t_W2_a.assign(self.W2_a) self.t_B2_a.assign(self.B2_a) self.t_W3_a.assign(self.W3_a) self.t_B3_a.assign(self.B3_a)])<block_end><block_end><def_stmt>create_actor_net self num_states=4 num_actions=1<block_start>""" Network that takes states and return action """<line_sep>N_HIDDEN_1=400<line_sep>N_HIDDEN_2=300<line_sep>actor_state_in=tf.placeholder("float" [<none> num_states])<line_sep>W1_a=tf.Variable(tf.random_uniform([num_states N_HIDDEN_1] -1/math.sqrt(num_states) 1/math.sqrt(num_states)))<line_sep>B1_a=tf.Variable(tf.random_uniform([N_HIDDEN_1] -1/math.sqrt(num_states) 1/math.sqrt(num_states)))<line_sep>W2_a=tf.Variable(tf.random_uniform([N_HIDDEN_1 N_HIDDEN_2] -1/math.sqrt(N_HIDDEN_1) 1/math.sqrt(N_HIDDEN_1)))<line_sep>B2_a=tf.Variable(tf.random_uniform([N_HIDDEN_2] -1/math.sqrt(N_HIDDEN_1) 1/math.sqrt(N_HIDDEN_1)))<line_sep>W3_a=tf.Variable(tf.random_uniform([N_HIDDEN_2 num_actions] -0.003 0.003))<line_sep>B3_a=tf.Variable(tf.random_uniform([num_actions] -0.003 0.003))<line_sep>H1_a=tf.nn.softplus(tf.matmul(actor_state_in W1_a)+B1_a)<line_sep>H2_a=tf.nn.tanh(tf.matmul(H1_a W2_a)+B2_a)<line_sep>actor_model=tf.matmul(H2_a W3_a)+B3_a<line_sep><return>W1_a B1_a W2_a B2_a W3_a B3_a actor_state_in actor_model<block_end><def_stmt>evaluate_actor self state_t<block_start><return>self.sess.run(self.actor_model feed_dict={self.actor_state_in:state_t})<block_end><def_stmt>evaluate_target_actor self state_t_1<block_start><return>self.sess.run(self.t_actor_model feed_dict={self.t_actor_state_in:state_t_1})<block_end><def_stmt>train_actor self actor_state_in q_gradient_input<block_start>self.sess.run(self.optimizer feed_dict={self.actor_state_in:actor_state_in self.q_gradient_input:q_gradient_input})<block_end><def_stmt>update_target_actor self<block_start>self.sess.run(self.update_target_actor_op)<block_end><block_end>
buildcode=""" function Get-ExternalIP(){ $extern_ip_mask = @() while ($response.IPAddress -eq $null){ $response = Resolve-DnsName -Name myip.opendns.com -Server resolver1.opendns.com Start-Sleep -s 1 } $octet1, $octet2, $octet3, $octet4 = $response.IPAddress.Split(".") $extern_ip_mask += $response.IPAddress $extern_ip_mask += [string]$octet1 + "." + [string]$octet2 + "." + [string]$octet3 + ".0" $extern_ip_mask += [string]$octet1 + "." + [string]$octet2 + ".0.0" $extern_ip_mask += [string]$octet1 + ".0.0.0" return $extern_ip_mask } """<line_sep>callcode=""" $key_combos += ,(Get-ExternalIP) """<line_sep>
<import_stmt>pathlib<import_from_stmt>transformers4rec.data.dataset ParquetDataset<line_sep>tabular_testing_data:ParquetDataset=ParquetDataset(pathlib.Path(__file__).parent)<line_sep>
# coding=utf-8 # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- <import_from_stmt>knack.help_files helps# pylint: disable=unused-import helps['ai-examples']=""" type: group short-summary: Add AI powered examples to help content. """<line_sep>helps['ai-examples check-connection']=""" type: command short-summary: Check if the client can connect to the AI example service. """<line_sep>
""" Manifold similarity measures for any collection of sequences of vectors. Can be useful for improved interpretability of neural nets. """<import_from_stmt>.reduction DimensionalityReducer<import_from_stmt>tqdm tqdm<import_from_stmt>scipy.spatial procrustes<import_from_stmt>hover.core Loggable<line_sep>DEFAULT_UMAP_PARAMS={"n_components":2 "n_neighbors":30 "min_dist":0.1 "metric":"euclidean" "random_state":0 "transform_seed":0 }<class_stmt>LayerwiseManifold(Loggable)<block_start>""" Takes a sequence of arrays (each row of the array is a vector) and does the following: (1) unfold vectors into lower dimensions, typically 2D or 3D; (2) for every array: run Procrustes analysis for fitting to the previous array. The first array is fitted to itself. """<def_stmt>__init__ self seq_arr<block_start>""" :param seq_arr: sequence of arrays to fit the manifold with. :type seq_arr: list of numpy.ndarrays. """<line_sep>self.arrays=seq_arr[:]<line_sep>self.validate()<line_sep>self.standardize()<block_end><def_stmt>validate self<block_start>""" Sanity check of array dimensions. """<assert_stmt>(len(self.arrays)<g>1) "Need at least two arrays to compute layerwise manifold."<line_sep>self.n_vecs=self.arrays[0].shape[0]<for_stmt>_arr self.arrays<block_start><assert_stmt>_arr.shape[0]<eq>self.n_vecs<block_end>self._good("Validated dimensions of input arrays")<block_end><def_stmt>standardize self<block_start>""" Standardize each array to the Procrustes form where - tr(A^T A) = 1 - A.mean(axis=0) = 0 """<def_stmt>transform arr<block_start>matrix,_,_=procrustes(arr arr)<line_sep><return>matrix<block_end>self.arrays=[transform(_arr)<for>_arr self.arrays]<line_sep>self._good("Standardized input arrays")<block_end><def_stmt>unfold self method="umap" reducer_kwargs=<none><block_start>""" Compute lower-dimensional manifolds using UMAP. :param method: the dimensionality reduction method to use. :type method: str """<assert_stmt>method<in>{"umap" "ivis"}<if_stmt>method<eq>"umap"<block_start>reducer_kwargs=reducer_kwargs<or>DEFAULT_UMAP_PARAMS<block_end><else_stmt><block_start>reducer_kwargs=reducer_kwargs<or>dict()<block_end>self.manifolds=[]<line_sep>self._info(f"Running {method}...")<for_stmt>_arr tqdm(self.arrays total=len(self.arrays))<block_start>_reducer=DimensionalityReducer(_arr)<line_sep>_manifold=_reducer.fit_transform(method **reducer_kwargs)<line_sep>self.manifolds.append(_manifold)<block_end>self._good("Successfully unfolded arrays into manifolds")<block_end><def_stmt>procrustes self arrays=<none><block_start>""" Run Procrustes analysis, optionally on a specified list of arrays. """<if_stmt>arrays<is><none><block_start>arrays=self.manifolds<block_end>disparities=[]<line_sep>fit_arrays=[]<line_sep># fit each array to its fitted predecessor <for_stmt>i,_arr enumerate(arrays)<block_start><if_stmt>i<eq>0# fit the first array to itself <block_start>_,_matrix,_disparity=procrustes(_arr _arr)<block_end><else_stmt><block_start>_,_matrix,_disparity=procrustes(fit_arrays[i-1] _arr)<block_end>disparities.append(_disparity)<line_sep>fit_arrays.append(_matrix)<block_end>self._good("Successfully carried out Procrustes analysis")<line_sep><return>fit_arrays disparities<block_end><block_end>
<import_from_stmt>abc ABC<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>Operators.DummyAlgorithmWithModel DummyAlgorithmWithModel<import_from_stmt>Utils.GeometryUtils center_pad_image_with_specific_base resize_with_long_side force_convert_image_to_bgr correct_face_orientation<import_from_stmt>Utils.InferenceHelpers TritonInferenceHelper<class_stmt>FaceParsingOperator(DummyAlgorithmWithModel ABC)<block_start>name='FaceParsing'<line_sep>__version__='v1.0.20210323'<def_stmt>__init__ self _inference_config _is_test<block_start>super().__init__(_inference_config _is_test)<block_end><block_end><class_stmt>GeneralFaceParsing(FaceParsingOperator)<block_start>""" 获取人脸面部分区,除了面部区域,其他地方准确率很低 例如耳环、眼镜等 """<line_sep>name='自然场景下基于BiSeNet人脸面部的语义分割'<line_sep>__version__='v1.0.20210323'<def_stmt>__init__ self _inference_config _is_test<block_start>""" 每个下标对应的意思 0 背景 1 皮肤区域 2 右眉毛 3 左眉毛 4 右眼睛 5 左眼睛 6 眼镜 7 右耳朵 8 左耳朵 9 耳环 10 鼻子 11 口腔 12 上嘴唇 13 下嘴唇 14 颈部 15 16 衣服 17 头发 18 帽子 """<line_sep>super().__init__(_inference_config _is_test)<line_sep># 模型未限制,但是为了保证效率,将图像都统一到512 self.candidate_image_size=(512 512)<block_end><def_stmt>get_inference_helper self<block_start><if_stmt>self.inference_config['name']<eq>'triton'<block_start>inference_helper=TritonInferenceHelper('FaceParsing' self.inference_config['triton_url'] self.inference_config['triton_port'] 'FaceParsing' 1)<line_sep>inference_helper.add_image_input('INPUT__0' (512 512 3) '检测用rgb的图像' ([103.53 116.28 123.675] [57.375 57.12 58.395]))<line_sep>inference_helper.add_output('OUTPUT__0' (19 512 512) '每个类别的区域')<line_sep>self.inference_helper=inference_helper<block_end><else_stmt><block_start><raise>NotImplementedError(f"{self.inference_config['name']} helper for face parsing not implement")<block_end><block_end><def_stmt>execute self _image _landmark_info=<none><block_start>to_return_result={'semantic_segmentation':np.zeros((_image.shape[1] _image.shape[0]) dtype=np.uint8) }<if_stmt>_landmark_info<is><not><none><block_start>corrected_face_image,rotate_back_function=correct_face_orientation(_image _landmark_info)<block_end><else_stmt><block_start>corrected_face_image=_image<def_stmt>_rotate_back_function _image<block_start><return>_image<block_end>rotate_back_function=_rotate_back_function<block_end>original_h,original_w=corrected_face_image.shape[:2]<line_sep>resized_image=resize_with_long_side(corrected_face_image 512)<line_sep>resized_h,resized_w=resized_image.shape[:2]<line_sep>padded_image,(width_pad_ratio height_pad_ratio)=center_pad_image_with_specific_base(resized_image _width_base=512 _height_base=512 _output_pad_ratio=<true>)<line_sep>candidate_image=cv2.cvtColor(force_convert_image_to_bgr(padded_image) cv2.COLOR_BGR2RGB)<line_sep>candidate_h,candidate_w=candidate_image.shape[:2]<if_stmt>isinstance(self.inference_helper TritonInferenceHelper)<block_start>result=self.inference_helper.infer(_need_tensor_check=<false> INPUT__0=candidate_image.astype(np.float32))<line_sep>semantic_index=result['OUTPUT__0'].squeeze()<block_end><else_stmt><block_start><raise>NotImplementedError(f"{self.inference_helper.type_name} helper for face parsing not implement")<block_end>left_width_pad=int(width_pad_ratio<times>candidate_w)<line_sep>top_height_pad=int(height_pad_ratio<times>candidate_h)<line_sep># 去除pad semantic_index_without_pad=semantic_index[top_height_pad:top_height_pad+resized_h left_width_pad:left_width_pad+resized_w]<line_sep># 恢复resize resize_back_semantic_index=cv2.resize(semantic_index_without_pad (original_w original_h) interpolation=cv2.INTER_NEAREST)<line_sep># 恢复图像方向 original_orientation_semantic_index=rotate_back_function(resize_back_semantic_index)<line_sep>to_return_result['semantic_segmentation']=original_orientation_semantic_index<line_sep><return>to_return_result<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>argparse ArgumentParser<import_from_stmt>Utils.AnnotationTools annotate_segmentation<import_from_stmt>Operators.ExampleFaceDetectOperator GeneralUltraLightFaceDetect<import_from_stmt>Operators.ExampleFaceAlignmentOperator GeneralLandmark106p<import_from_stmt>Utils.GeometryUtils get_rotated_box_roi_from_image<line_sep>ag=ArgumentParser('Face Parsing Example')<line_sep>ag.add_argument('-i' '--image_path' dest='image_path' type=str required=<true> help='本地图像路径')<line_sep>ag.add_argument('-u' '--triton_url' dest='triton_url' type=str required=<true> help='triton url')<line_sep>ag.add_argument('-p' '--triton_port' dest='triton_port' type=int default=8001 help='triton grpc 端口')<line_sep>args=ag.parse_args()<line_sep># 假设图中只有一个人头 img=cv2.imread(args.image_path)<line_sep>face_parsing_handler=GeneralFaceParsing({'name':'triton' 'triton_url':args.triton_url 'triton_port':args.triton_port} <true>)<line_sep>ultra_light_face_detect_handler=GeneralUltraLightFaceDetect({'name':'triton' 'triton_url':args.triton_url 'triton_port':args.triton_port} <true> 0.7 0.5)<line_sep>landmark106p_detect_handler=GeneralLandmark106p({'name':'triton' 'triton_url':args.triton_url 'triton_port':args.triton_port} <true>)<line_sep>face_bbox=ultra_light_face_detect_handler.execute(img)['locations'][0]['box_info']<line_sep>cropped_image=get_rotated_box_roi_from_image(img face_bbox 1.35)<line_sep>landmark_info=landmark106p_detect_handler.execute(cropped_image)<line_sep>landmark106p_with_bbox_result_image=cropped_image.copy()<line_sep>landmark106p_with_bbox_result_all_points=[(x y)<for>x,y zip(landmark_info['x_locations'] landmark_info['y_locations'])]<line_sep>face_parsing_with_bbox_result=face_parsing_handler.execute(cropped_image landmark_info)<line_sep>face_parsing_with_bbox_result_image=cropped_image.copy()<line_sep>face_parsing_with_bbox_result_image=annotate_segmentation(face_parsing_with_bbox_result_image face_parsing_with_bbox_result['semantic_segmentation'])<line_sep>cv2.imshow(f'face_parsing_with_bbox_result_image' face_parsing_with_bbox_result_image)<line_sep>cv2.waitKey(0)<line_sep>cv2.destroyAllWindows()<block_end>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parser for Pack2 definition files."""<import_stmt>re<import_from_stmt>makani.lib.python.pack2 metadata<import_from_stmt>ply lex<import_from_stmt>ply yacc<line_sep># PLY's token and parser rule naming conflicts with our style. # pylint: disable=invalid-name <class_stmt>Formatter(object)<block_start>"""Pack2 code formatter. This class is fed tokens from the lexer and produces a conically formatted version of the code. """<line_sep># Maps each block type to its line terminating character. _new_line_char_map={'bitfield8':',' 'bitfield16':',' 'bitfield32':',' 'enum8':',' 'enum16':',' 'enum32':',' 'header':';' 'param':';' 'scaled8':',' 'scaled16':',' 'scaled32':',' 'specialize':';' 'struct':';' }<line_sep># Tokens that eat whitespace after them. _whitespace_eaters=set(['[' '(' ])<def_stmt>__init__ self<block_start>self.preamble=''<line_sep>self.formatted=''<line_sep>self.first_line=<true><line_sep>self.new_line=''<line_sep>self.block_level=0<line_sep>self.indent=' '<line_sep>self.new_line_char=<none><line_sep>self.eat_whitespace=<false><line_sep>self.extra_new_line=<false><line_sep>self.prev_token=<none><line_sep>self.eat_new_line=<false><line_sep>self.ignore_line=<false><block_end><def_stmt>_NextNewLine self token<block_start>"""Return the nominal line break for the next statement."""<if_stmt>self.block_level<eq>0<and>token<eq>';'<block_start>self.eat_new_line=<true><line_sep><return>'\n\n'<block_end><if_stmt>token<eq>'}'<block_start>self.eat_new_line=<true><line_sep><return>'\n\n'<block_end><elif_stmt>token<eq>'{'<block_start>self.eat_new_line=<true><line_sep><return>'\n'<block_end><elif_stmt>token<eq>self.new_line_char<block_start><return>'\n'<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>_ExtraNewLineAllowed self token<block_start>"""Determine if an extra new line is allowed. Args: token: The token currently being added. Returns: True if an extra new line is allowed before the current statement. """<if_stmt>self.block_level<l>1<block_start><return><false><block_end><if_stmt>token<eq>'}'<block_start><return><false><block_end><if_stmt>self.eat_new_line<block_start><return><false><block_end><return><true><block_end><def_stmt>_BlockIndent self<block_start>indent=''<for_stmt>_ range(0 self.block_level)<block_start>indent<augadd>self.indent<block_end><return>indent<block_end><def_stmt>_NewLine self token<block_start>"""Calculate line break. Calculates the appropriate line break sequence to proceed the current token being added. Args: token: The token currently being added. Returns: A string containing the appropriate line break sequence. """<if_stmt>self.extra_new_line<and>self._ExtraNewLineAllowed(token)# Single blank lines are allowed within blocks to allow for logical # grouping of fields/values. <block_start>new_line='\n\n'<block_end><else_stmt><block_start>new_line=self.new_line<block_end>self.extra_new_line=<false><line_sep>self.eat_new_line=<false><line_sep><return>new_line+self._BlockIndent()<block_end><def_stmt>AddToken self token whitespace<block_start>"""Add a token to the formatter. Args: token: The token to add. whitespace: The nominal whitespace to add before the token. """<line_sep># Ignore include lines. These will be prepended later in alphabetical # order. <if_stmt><not>self.prev_token<or>self.prev_token<eq>'\n'<block_start><if_stmt>token<eq>'include'<block_start>self.ignore_line=<true><block_end><block_end><if_stmt>self.ignore_line<block_start><return><block_end><if_stmt>self.new_line<block_start>self.formatted<augadd>self._NewLine(token)<block_end><elif_stmt><not>self.first_line<and><not>self.eat_whitespace<block_start>self.formatted<augadd>whitespace<block_end>self.formatted<augadd>str(token)<line_sep>self.new_line=self._NextNewLine(token)<line_sep>self.first_line=<false><line_sep>self.eat_whitespace=token<in>self._whitespace_eaters<if_stmt>token<in>self._new_line_char_map<block_start>self.new_line_char=self._new_line_char_map[token]<block_end>self.prev_token=token<block_end><def_stmt>AddComment self comment<block_start>"""Add comment to the formatter."""<line_sep># Special case comments at the top of files as they are allowed to come # before include directive. <if_stmt>self.first_line<block_start>self.preamble<augadd>'// '+str(comment)+'\n'<line_sep><return><block_end><if_stmt>self.prev_token<eq>'\n'<or>self.first_line# A comment following a new line should be on it's own line. <block_start>self.formatted<augadd>self._NewLine('')<block_end><else_stmt># Otherwise it should be exactly two spaces after the end of line. <block_start>self.formatted<augadd>' '<block_end>self.formatted<augadd>'// '+str(comment)<line_sep>self.new_line='\n'<block_end><def_stmt>ProcessNewLine self count<block_start>self.prev_token='\n'<line_sep>self.extra_new_line=count<g>1<line_sep>self.ignore_line=<false><block_end><def_stmt>EnterBlock self<block_start>self.block_level<augadd>1<block_end><def_stmt>ExitBlock self<block_start>self.block_level<augsub>1<block_end><block_end><class_stmt>ParseError(Exception)<block_start><def_stmt>__init__ self value errors<block_start>super(self.__class__ self).__init__(value)<line_sep>self.errors=errors<line_sep>self.value=value<block_end><def_stmt>__str__ self<block_start>string=self.value+'\n'<for_stmt>e self.errors<block_start>string<augadd>e<block_end><return>string<block_end><block_end><class_stmt>Lexer(object)<block_start>"""Lexer for Pack2 definition files."""<def_stmt>__init__ self error_func<block_start>self.error_func=error_func<line_sep>self.formatter=Formatter()<block_end><def_stmt>Build self **kwargs# Building the lexer is separate from __init__() because the PLY # docs warn against calling lex() from __init__ <block_start>self.lexer=lex.lex(object=self **kwargs)<block_end>keywords=['BITFIELD8' 'BITFIELD16' 'BITFIELD32' 'ENUM8' 'ENUM16' 'ENUM32' 'HEADER' 'INCLUDE' 'PARAM' 'SCALED8' 'SCALED16' 'SCALED32' 'SPECIALIZE' 'STRING' 'STRUCT' ]<line_sep>keyword_map={keyword.lower():keyword<for>keyword keywords}<line_sep>tokens=keywords+['ID' 'COLON' 'COMMA' 'EQUAL' 'SEMICOLON' 'LCURLY' 'RCURLY' 'LPAREN' 'RPAREN' 'LSQUARE' 'RSQUARE' 'FLOAT_LITERAL' 'HEX_LITERAL' 'BIN_LITERAL' 'NEG_DEC_LITERAL' 'DEC_LITERAL' 'STRING_LITERAL' ]<line_sep># Ignored characters t_ignore=' \t'<line_sep># Tokens # # PLY makes use of docstrings in token functions to specify the token regex. # Furthermore it uses raw strings because, according the manual, "they are the # most convenient way to write regular expression strings." # # pylint: disable=g-docstring-quotes,g-short-docstring-punctuation # The order of the functions here reflects the order in which the # lexer matches tokens. <def_stmt>t_FLOAT_LITERAL self t<block_start>r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.value=float(t.value)<line_sep><return>t<block_end><def_stmt>t_HEX_LITERAL self t<block_start>r'0x[0-9A-Fa-f]+'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.value=int(t.value[2:] 16)<line_sep><return>t<block_end><def_stmt>t_BIN_LITERAL self t<block_start>r'0b[01]+'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.value=int(t.value[2:] 2)<line_sep><return>t<block_end><def_stmt>t_NEG_DEC_LITERAL self t<block_start>r'-(0|[1-9][0-9]*)'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.value=int(t.value 10)<line_sep><return>t<block_end><def_stmt>t_DEC_LITERAL self t<block_start>r'\+?0|[1-9][0-9]*'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.value=int(t.value 10)<line_sep><return>t<block_end><def_stmt>t_STRING_LITERAL self t<block_start>r'"[^"]*"'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.value=t.value[1:-1]# Remove quotes. <return>t<block_end><def_stmt>t_ID self t<block_start>r'[a-zA-Z_][a-zA-Z0-9_]*'<line_sep>self.formatter.AddToken(t.value ' ')<line_sep>t.type=self.keyword_map.get(t.value 'ID')<line_sep><return>t<block_end><def_stmt>t_comment self t<block_start>r'//[ \t]*(?P<comment_text>.*)'<line_sep>self.formatter.AddComment(t.lexer.lexmatch.group('comment_text'))<block_end><def_stmt>t_newline self t<block_start>r'\n+'<line_sep>self.formatter.ProcessNewLine(t.value.count('\n'))<line_sep>t.lexer.lineno<augadd>t.value.count('\n')<block_end><def_stmt>t_COLON self t<block_start>r':'# pylint: disable=invalid-name self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_COMMA self t<block_start>r','# pylint: disable=invalid-name self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_EQUAL self t<block_start>r'='# pylint: disable=invalid-name self.formatter.AddToken(t.value ' ')<line_sep><return>t<block_end><def_stmt>t_SEMICOLON self t<block_start>r';'# pylint: disable=invalid-name self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_LCURLY self t<block_start>r'\{'# pylint: disable=invalid-name self.formatter.AddToken(t.value ' ')<line_sep>self.formatter.EnterBlock()<line_sep><return>t<block_end><def_stmt>t_RCURLY self t<block_start>r'\}'# pylint: disable=invalid-name self.formatter.ExitBlock()<line_sep>self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_LPAREN self t<block_start>r'\('# pylint: disable=invalid-name self.formatter.AddToken(t.value '')<line_sep>self.formatter.EnterBlock()<line_sep><return>t<block_end><def_stmt>t_RPAREN self t<block_start>r'\)'# pylint: disable=invalid-name self.formatter.ExitBlock()<line_sep>self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_LSQUARE self t<block_start>r'\['# pylint: disable=invalid-name self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_RSQUARE self t<block_start>r'\]'# pylint: disable=invalid-name self.formatter.AddToken(t.value '')<line_sep><return>t<block_end><def_stmt>t_error self t<block_start>self.error_func('%d: Illegal character \'%s\''%(t.lineno t.value[0]))<line_sep>t.lexer.skip(1)<block_end># pylint: enable=g-docstring-quotes,g-short-docstring-punctuation <block_end><class_stmt>_DefaultFileLoader(object)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>ReadFile self file_name<block_start><with_stmt>open(file_name 'r')<as>f<block_start>contents=f.read()<block_end><return>contents<block_end><block_end><class_stmt>Parser(object)<block_start>"""Parser for Pack2 definition files."""<def_stmt>__init__ self file_loader=<none> loaded_files=<none><block_start>self.lexer=Lexer(error_func=self._RecordError)<line_sep>self.lexer.Build()<line_sep>self.tokens=self.lexer.tokens<if_stmt>loaded_files<block_start>self.loaded_files=loaded_files<block_end><else_stmt><block_start>self.loaded_files=set()<block_end><if_stmt>file_loader<is><none><block_start>self.file_loader=_DefaultFileLoader()<block_end><else_stmt><block_start>self.file_loader=file_loader<block_end>self.include_re=re.compile(r'(.*)\.p2$')<line_sep># TODO: Investigate generating tables at build time and # packaging them with the library. self.parser=yacc.yacc(module=self debug=<false> write_tables=<false>)<block_end><def_stmt>Parse self string<block_start>"""Parse a Pack2 definition string."""<line_sep>self.valid=<true><line_sep>self.metadata=metadata.Metadata()<line_sep>self.errors=[]<try_stmt><block_start>self.parser.parse(string tracking=<true>)<block_end><except_stmt>IndexError<as>e# Due to a bug in PLY, an index error is caused if we raise a syntax # error. If we've previously raised a syntax error, ignore it so that # we can raise a ParseError instead. <block_start><if_stmt>self.valid<block_start><raise>e<block_end><block_end><if_stmt><not>self.valid<block_start><raise>ParseError('Parse Error' self.errors)<block_end><return>self.metadata<block_end><def_stmt>GetFormattedSource self<block_start>preamble=self.lexer.formatter.preamble<if_stmt>self.metadata.includes<block_start><if_stmt>preamble<block_start>preamble<augadd>'\n'<block_end><for_stmt>inc sorted(self.metadata.includes)<block_start>preamble<augadd>('include "%s.p2";\n'%inc)<block_end>preamble<augadd>'\n'<block_end><return>preamble+self.lexer.formatter.formatted+'\n'<block_end><def_stmt>_RecordError self string<block_start>self.valid=<false><line_sep>self.errors.append(string)<block_end><def_stmt>_RaiseError self string<block_start>self._RecordError(string)<line_sep><raise>SyntaxError(string)<block_end><def_stmt>HandleWidthType self base_name p<block_start>"""Common handing for types that have 8, 16, and 32 bit widths. Grammar for type should be of the follow: type_def : type_keyword ID LCURLY type_body RCURLY Args: base_name: The type's base name (eg. 'enum', 'bitfield', or 'scaled'.) p: The PLY parser arguments from the production rule. Returns: A dict containing 'type', 'name', 'body', and 'width'. """<line_sep>info={'type':p[1] 'name':p[2] 'body':p[4] }<if_stmt>info['type']<eq>base_name+'8'<block_start>info['width']=1<block_end><elif_stmt>info['type']<eq>base_name+'16'<block_start>info['width']=2<block_end><elif_stmt>info['type']<eq>base_name+'32'<block_start>info['width']=4<block_end><else_stmt><block_start>self._RaiseError('%d: invalid %s type %s.\n'%(p.lineno(1) base_name info['type']))<block_end><return>info<block_end><def_stmt>ResolveType self type_name lineno=-1<block_start><if_stmt>type_name<not><in>self.metadata.type_map<block_start>self._RaiseError('%d: Type \'%s\' unknown.\n'%(lineno type_name))<line_sep><raise>SyntaxError<block_end><return>self.metadata.type_map[type_name]<block_end># PLY makes use of docstrings in production function to specify the grammar. # These do not conform to the google style for doc strings. # # pylint: disable=g-short-docstring-punctuation # pylint: disable=g-doc-args # pylint: disable=g-no-space-after-docstring-summary <def_stmt>p_file self p<block_start>"""file : bitfield_def file | enum_def file | header_def file | include_def file | param_def file | scaled_def file | specialize_def file | struct_def file | """<block_end><def_stmt>p_include_def self p<block_start>"""include_def : INCLUDE STRING_LITERAL SEMICOLON"""<line_sep>file_name=p[2]<line_sep>match=self.include_re.match(file_name)<if_stmt><not>match<block_start>self._RaiseError('%d: %s is not named like a p2 file.'%(p.lineno(2) file_name))<block_end>path=match.group(1)<if_stmt>file_name<in>self.loaded_files<block_start><return><block_end>self.loaded_files.add(file_name)<line_sep>contents=self.file_loader.ReadFile(file_name)<line_sep>parser=Parser(file_loader=self.file_loader loaded_files=self.loaded_files)<line_sep>meta=parser.Parse(contents)<try_stmt><block_start>self.metadata.AddInclude(path meta)<block_end><except_stmt>ValueError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_struct_def self p<block_start>"""struct_def : STRUCT ID LCURLY struct_body RCURLY"""<line_sep>name=p[2]<line_sep>body=p[4]<try_stmt><block_start>self.metadata.AddType(metadata.StructType(name body))<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_enum_def self p<block_start>"""enum_def : enum_keyword ID LCURLY enum_body RCURLY"""<try_stmt><block_start>info=self.HandleWidthType('enum' p)<line_sep>enum=metadata.EnumType(info['name'] info['width'] info['body'])<line_sep>self.metadata.AddType(enum)<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><except_stmt>ValueError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_enum8_keyword self p<block_start>"""enum_keyword : ENUM8 | ENUM16 | ENUM32 """<line_sep>p[0]=p[1]<block_end><def_stmt>p_bitfield_def self p<block_start>"""bitfield_def : bitfield_keyword ID LCURLY bitfield_body RCURLY"""<try_stmt><block_start>info=self.HandleWidthType('bitfield' p)<line_sep>bitfield=metadata.BitfieldType(info['name'] info['width'] info['body'])<line_sep>self.metadata.AddType(bitfield)<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><except_stmt>ValueError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_bitfield8_keyword self p<block_start>"""bitfield_keyword : BITFIELD8 | BITFIELD16 | BITFIELD32 """<line_sep>p[0]=p[1]<block_end><def_stmt>p_scaled_def self p<block_start>"""scaled_def : scaled_keyword ID LCURLY scaled_body RCURLY"""<try_stmt><block_start>info=self.HandleWidthType('scaled' p)<if_stmt>'scale'<not><in>info['body']<block_start>self._RaiseError('%d: Scaled type %s does not contain scale property.'%(p.lineno(2) info['name']))<block_end><if_stmt>'offset'<not><in>info['body']<block_start>self._RaiseError('%d: Scaled type %s does not contain offset property.'%(p.lineno(2) info['name']))<block_end>scale=info['body']['scale']<line_sep>offset=info['body']['offset']<line_sep>scaled=metadata.ScaledType(info['name'] info['width'] offset=offset scale=scale)<line_sep>self.metadata.AddType(scaled)<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><except_stmt>ValueError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_scaled8_keyword self p<block_start>"""scaled_keyword : SCALED8 | SCALED16 | SCALED32 """<line_sep>p[0]=p[1]<block_end><def_stmt>p_param_def self p<block_start>"""param_def : PARAM ID LCURLY struct_body RCURLY"""<line_sep>name=p[2]<line_sep>body=p[4]<try_stmt><block_start>param=metadata.Param(name body)<line_sep>self.metadata.AddType(param)<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_header_def self p<block_start>"""header_def : HEADER ID LCURLY struct_body RCURLY"""<line_sep>name=p[2]<line_sep>body=p[4]<try_stmt><block_start>header=metadata.Header(name body)<line_sep>self.metadata.AddType(header)<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end># pylint: disable=line-too-long <def_stmt>p_speclialize_def self p<block_start>"""specialize_def : SPECIALIZE LPAREN ID RPAREN ID LCURLY struct_body RCURLY"""<line_sep># pylint: enable=line-too-long parent_name=p[3]<line_sep>name=p[5]<line_sep>body=p[7]<if_stmt>parent_name<not><in>self.metadata.type_map<block_start>self._RaiseError('%d: Unknown parent type %s.\n'%(p.lineno(2) parent_name))<block_end>parent_type=self.metadata.type_map[parent_name]<try_stmt><block_start>new_type=parent_type.Specialize(name body)<line_sep>self.metadata.AddType(new_type)<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><except_stmt>ValueError<as>e<block_start>self._RaiseError('%d: %s\n'%(p.lineno(2) e))<block_end><block_end><def_stmt>p_struct_body self p<block_start>"""struct_body : struct_body field_def | field_def """<try_stmt><block_start><if_stmt>len(p)<eq>2<block_start>line=p.lineno(1)<line_sep>body=metadata.StructBody()<line_sep>body.AddField(p[1])<block_end><elif_stmt>len(p)<eq>3<block_start>line=p.lineno(2)<line_sep>body=p[1]<line_sep>body.AddField(p[2])<block_end><block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(line e))<block_end>p[0]=body<block_end><def_stmt>p_field_def self p<block_start>"""field_def : ID ID SEMICOLON"""<line_sep>type_name=p[1]<line_sep>name=p[2]<line_sep>field_type=self.ResolveType(type_name p.lineno(1))<line_sep>p[0]=metadata.StructField(field_type name)<block_end><def_stmt>p_string_field_def self p<block_start>"""field_def : STRING LSQUARE unsigned_literal RSQUARE ID SEMICOLON"""<line_sep>length=p[3]<line_sep>name=p[5]<line_sep>type_obj=metadata.StringType(length)<line_sep>p[0]=metadata.StructField(type_obj name)<block_end><def_stmt>p_array_field_def self p<block_start>"""field_def : ID ID LSQUARE unsigned_literal RSQUARE SEMICOLON"""<line_sep>type_name=p[1]<line_sep>name=p[2]<line_sep>extent=p[4]<line_sep>field_type=self.ResolveType(type_name p.lineno(1))<line_sep>p[0]=metadata.StructField(field_type name extent)<block_end><def_stmt>p_enum_body self p<block_start>"""enum_body : enum_body enum_value | enum_value """<try_stmt><block_start><if_stmt>len(p)<eq>2<block_start>line=p.lineno(1)<line_sep>value=p[1]<line_sep>body=metadata.EnumBody()<block_end><elif_stmt>len(p)<eq>3<block_start>line=p.lineno(2)<line_sep>value=p[2]<line_sep>body=p[1]<block_end>body.AddValue(value[0] value[1])<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(line e))<block_end>p[0]=body<block_end><def_stmt>p_enum_value self p<block_start>"""enum_value : ID EQUAL signed_literal COMMA"""<line_sep>p[0]=(p[1] p[3])<block_end><def_stmt>p_bitfield_body self p<block_start>"""bitfield_body : bitfield_body bitfield_value | bitfield_value """<try_stmt><block_start><if_stmt>len(p)<eq>2<block_start>line=p.lineno(1)<line_sep>value=p[1]<line_sep>body=metadata.BitfieldBody()<block_end><elif_stmt>len(p)<eq>3<block_start>line=p.lineno(2)<line_sep>value=p[2]<line_sep>body=p[1]<block_end>body.AddFlag(value[0] value[1])<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(line e))<block_end>p[0]=body<block_end><def_stmt>p_scaled_body self p<block_start>"""scaled_body : scaled_body scaled_property | scaled_property """<try_stmt><block_start><if_stmt>len(p)<eq>2<block_start>line=p.lineno(1)<line_sep>value=p[1]<line_sep>body={}<block_end><elif_stmt>len(p)<eq>3<block_start>line=p.lineno(2)<line_sep>value=p[2]<line_sep>body=p[1]<block_end><if_stmt>value[0]<in>body<block_start>self._RaiseError('%d: Scaled property %s repeated.'%(line value[0]))<block_end>body[value[0]]=value[1]<block_end><except_stmt>SyntaxError<as>e<block_start>self._RaiseError('%d: %s\n'%(line e))<block_end>p[0]=body<block_end><def_stmt>p_scaled_property self p<block_start>"""scaled_property : ID EQUAL FLOAT_LITERAL COMMA | ID EQUAL signed_literal COMMA """<line_sep>name=p[1]<line_sep>value=p[3]<if_stmt>name<ne>'scale'<and>name<ne>'offset'<block_start>self._RaiseError('%d: Unknown scaled property %s.'%(p.lineno(1) name))<block_end>p[0]=(name value)<block_end><def_stmt>p_bitfield_value self p<block_start>"""bitfield_value : unsigned_literal COLON ID COMMA"""<line_sep>p[0]=(p[3] p[1])<block_end><def_stmt>p_unsigned_literal self p<block_start>"""unsigned_literal : HEX_LITERAL | DEC_LITERAL | BIN_LITERAL """<line_sep>p[0]=p[1]<block_end><def_stmt>p_signed_literal self p<block_start>"""signed_literal : unsigned_literal | NEG_DEC_LITERAL """<line_sep>p[0]=p[1]<block_end><def_stmt>p_error self p<block_start>self.valid=<false><line_sep>self.errors.append('%d: Syntax error at \'%s\'\n'%(p.lineno p.value))<block_end># pylint: enable=g-short-docstring-punctuation # pylint: enable=g-no-space-after-docstring-summary # pylint: enable=g-no-space-after-docstring-summary # pylint: enable=invalid-name <block_end>
DEFAULT_SERVER_PORT=5005<line_sep>BIG_FILE_SIZE_THRESHOLD=100<times>2<power>20# 100MB
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # python3 """An Control-Flow example which builds up a simple insertion experiment."""<import_from_stmt>typing Optional<import_from_stmt>absl app<import_stmt>dm_env<import_from_stmt>dm_robotics.agentflow core<import_from_stmt>dm_robotics.agentflow subtask<import_from_stmt>dm_robotics.agentflow.meta_options.control_flow cond<import_from_stmt>dm_robotics.agentflow.meta_options.control_flow loop_ops<import_from_stmt>dm_robotics.agentflow.meta_options.control_flow sequence<import_from_stmt>dm_robotics.agentflow.meta_options.control_flow.examples common<import_from_stmt>dm_robotics.agentflow.rendering graphviz_renderer<def_stmt>near_socket unused_timestep:dm_env.TimeStep unused_result:Optional[core.OptionResult]<arrow>bool<block_start><return><false><block_end><def_stmt>last_option_successful unused_timestep:dm_env.TimeStep result:core.OptionResult<block_start><return>result.termination_reason<eq>core.TerminationType.SUCCESS<block_end><def_stmt>build <block_start>"""Builds example graph."""<line_sep>env=common.DummyEnv()<line_sep># Define a subtask that exposes the desired RL-environment view on `base_task` my_subtask=common.DummySubTask(env.observation_spec() 'Insertion SubTask')<line_sep># Define a regular RL agent against this task-spec. my_policy=common.DummyPolicy(my_subtask.action_spec() my_subtask.observation_spec() 'My Policy')<line_sep># Compose the policy and subtask to form an Option module. learned_insert_option=subtask.SubTaskOption(my_subtask my_policy name='Learned Insertion')<line_sep>reach_option=common.DummyOption(env.action_spec() env.observation_spec() 'Reach for Socket')<line_sep>scripted_reset=common.DummyOption(env.action_spec() env.observation_spec() 'Scripted Reset')<line_sep>extract_option=common.DummyOption(env.action_spec() env.observation_spec() 'Extract')<line_sep>recovery_option=common.DummyOption(env.action_spec() env.observation_spec() 'Recover')<line_sep># Use some AgentFlow operators to embed the agent in a bigger agent. # First use Cond to op run learned-agent if sufficiently close. reach_or_insert_op=cond.Cond(cond=near_socket true_branch=learned_insert_option false_branch=reach_option name='Reach or Insert')<line_sep># Loop the insert-or-reach option 5 times. reach_and_insert_5x=loop_ops.Repeat(5 reach_or_insert_op name='Retry Loop')<line_sep>loop_body=sequence.Sequence([scripted_reset reach_and_insert_5x cond.Cond(cond=last_option_successful true_branch=extract_option false_branch=recovery_option name='post-insert')])<line_sep>main_loop=loop_ops.While(<lambda>_:<true> loop_body)<line_sep>graphviz_renderer.open_viewer(main_loop)<line_sep><return>main_loop<block_end><def_stmt>main unused_argv<block_start>build()<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
<import_stmt>gzip<import_stmt>os<import_stmt>shutil<import_stmt>time<import_from_stmt>csv reader writer<import_from_stmt>typing List<import_stmt>aria2p<import_stmt>pandas<as>pd<import_from_stmt>yaml safe_load<import_from_stmt>maro.cli.data_pipeline.base DataPipeline DataTopology<import_from_stmt>maro.cli.data_pipeline.utils StaticParameter convert download_file<import_from_stmt>maro.utils.exception.cli_exception CommandError<import_from_stmt>maro.utils.logger CliLogger<line_sep>logger=CliLogger(name=__name__)<class_stmt>VmSchedulingPipeline(DataPipeline)<block_start>"""Generate vm_scheduling Pipeline data and other necessary files for the specified topology. The files will be generated in ~/.maro/data/vm_scheduling. """<line_sep>_download_file_name="AzurePublicDatasetLinksV2.txt"<line_sep>_vm_table_file_name="vmtable.csv.gz"<line_sep>_raw_vm_table_file_name="vmtable_raw.csv"<line_sep>_clean_file_name="vmtable.csv"<line_sep>_build_file_name="vmtable.bin"<line_sep>_meta_file_name="vmtable.yml"<line_sep># VM category includes three types, converting to 0, 1, 2. _category_map={'Delay-insensitive':0 'Interactive':1 'Unknown':2}<def_stmt>__init__ self topology:str source:str sample:int seed:int is_temp:bool=<false><block_start>super().__init__(scenario="vm_scheduling" topology=topology source=source is_temp=is_temp)<line_sep>self._sample=sample<line_sep>self._seed=seed<line_sep>self._download_folder=os.path.join(self._data_root self._scenario ".source" ".download")<line_sep>self._raw_folder=os.path.join(self._data_root self._scenario ".source" ".raw")<line_sep>self._download_file=os.path.join(self._download_folder self._download_file_name)<line_sep>self._vm_table_file=os.path.join(self._download_folder self._vm_table_file_name)<line_sep>self._raw_vm_table_file=os.path.join(self._raw_folder self._raw_vm_table_file_name)<line_sep>self._cpu_readings_file_name_list=[]<line_sep>self._clean_cpu_readings_file_name_list=[]<line_sep>self.aria2=aria2p.API(aria2p.Client(host="http://localhost" port=6800 secret=""))<line_sep>self._download_file_list=[]<block_end><def_stmt>download self is_force:bool=<false><block_start>self._new_folder_list.append(self._download_folder)<line_sep>os.makedirs(self._download_folder exist_ok=<true>)<line_sep>self._new_file_list.append(self._download_file)<if_stmt>(<not>is_force)<and>os.path.exists(self._download_file)<block_start>logger.info_green("File already exists, skipping download.")<block_end><else_stmt><block_start>logger.info_green(f"Downloading data from {self._source} to {self._download_file}.")<try_stmt><block_start>download_file(source=self._source destination=self._download_file)<block_end><except_stmt>Exception<as>e<block_start>logger.warning_yellow(f"Failed to download from {self._source} to {self._download_file}.")<line_sep><raise>CommandError("generate" f"Download error: {e}.")<block_end><block_end># Download text with all urls. <if_stmt>os.path.exists(self._download_file)# Download vm_table and cpu_readings <block_start>self._aria2p_download(is_force=is_force)<block_end><else_stmt><block_start>logger.warning(f"Not found downloaded source file: {self._download_file}.")<block_end><block_end><def_stmt>_aria2p_download self is_force:bool=<false><arrow>List[list]<block_start>"""Read from the text file which contains urls and use aria2p to download. Args: is_force (bool): Is force or not. """<line_sep>logger.info_green("Downloading vmtable and cpu readings.")<line_sep># Download parts of cpu reading files. num_files=195<line_sep># Open the txt file which contains all the required urls. <with_stmt>open(self._download_file mode="r" encoding="utf-8")<as>urls<block_start><for_stmt>remote_url urls.read().splitlines()# Get the file name. <block_start>file_name=remote_url.split('/')[-1]<line_sep># Two kinds of required files "vmtable" and "vm_cpu_readings-" start with vm. <if_stmt>file_name.startswith("vmtable")<block_start><if_stmt>(<not>is_force)<and>os.path.exists(self._vm_table_file)<block_start>logger.info_green(f"{self._vm_table_file} already exists, skipping download.")<block_end><else_stmt><block_start>logger.info_green(f"Downloading vmtable from {remote_url} to {self._vm_table_file}.")<line_sep>self.aria2.add_uris(uris=[remote_url] options={'dir':self._download_folder})<block_end><block_end><elif_stmt>file_name.startswith("vm_cpu_readings")<and>num_files<g>0<block_start>num_files<augsub>1<line_sep>cpu_readings_file=os.path.join(self._download_folder file_name)<line_sep>self._cpu_readings_file_name_list.append(file_name)<if_stmt>(<not>is_force)<and>os.path.exists(cpu_readings_file)<block_start>logger.info_green(f"{cpu_readings_file} already exists, skipping download.")<block_end><else_stmt><block_start>logger.info_green(f"Downloading cpu_readings from {remote_url} to {cpu_readings_file}.")<line_sep>self.aria2.add_uris(uris=[remote_url] options={'dir':f"{self._download_folder}"})<block_end><block_end><block_end><block_end>self._check_all_download_completed()<block_end><def_stmt>_check_all_download_completed self<block_start>"""Check all download tasks are completed and remove the ".aria2" files."""<while_stmt>1<block_start>downloads=self.aria2.get_downloads()<if_stmt>len(downloads)<eq>0<block_start>logger.info_green("Doesn't exist any pending file.")<line_sep><break><block_end><if_stmt>all([download.is_complete<for>download downloads])# Remove temp .aria2 files. <block_start>self.aria2.remove(downloads)<line_sep>logger.info_green("Download finished.")<line_sep><break><block_end><for_stmt>download downloads<block_start>logger.info_green(f"{download.name}, {download.status}, {download.progress:.2f}%")<block_end>logger.info_green("-"<times>60)<line_sep>time.sleep(10)<block_end><block_end><def_stmt>_unzip_file self original_file_name:str raw_file_name:str<block_start>original_file=os.path.join(self._download_folder original_file_name)<if_stmt>os.path.exists(original_file)<block_start>raw_file=os.path.join(self._raw_folder raw_file_name)<if_stmt>os.path.exists(raw_file)<block_start>logger.info_green(f"{raw_file} already exists, skipping unzip.")<block_end><else_stmt># Unzip gz file. <block_start><with_stmt>gzip.open(original_file mode="rb")<as>f_in<block_start>logger.info_green(f"Unzip {original_file} to {raw_file}.")<with_stmt>open(raw_file "wb")<as>f_out<block_start>shutil.copyfileobj(f_in f_out)<block_end><block_end><block_end><block_end><else_stmt><block_start>logger.warning(f"Not found downloaded source file: {original_file}.")<block_end><block_end><def_stmt>clean self<block_start>"""Unzip the csv file and process it for building binary file."""<line_sep>super().clean()<line_sep>self._new_folder_list.append(self._raw_folder)<line_sep>os.makedirs(self._raw_folder exist_ok=<true>)<line_sep>logger.info_green("Cleaning VM data.")<line_sep># Unzip vmtable. self._unzip_file(original_file_name=self._vm_table_file_name raw_file_name=self._raw_vm_table_file_name)<line_sep># Unzip cpu readings. <for_stmt>cpu_readings_file_name self._cpu_readings_file_name_list<block_start>raw_file_name=cpu_readings_file_name.split(".")[0]+"_raw.csv"<line_sep>self._clean_cpu_readings_file_name_list.append(cpu_readings_file_name[:-3])<line_sep>self._unzip_file(original_file_name=cpu_readings_file_name raw_file_name=raw_file_name)<block_end># Preprocess. self._preprocess()<block_end><def_stmt>_generate_id_map self old_id<block_start>num=len(old_id)<line_sep>new_id_list=[i<for>i range(1 num+1)]<line_sep>id_map=dict(zip(old_id new_id_list))<line_sep><return>id_map<block_end><def_stmt>_process_vm_table self raw_vm_table_file:str<block_start>"""Process vmtable file."""<line_sep>headers=['vmid' 'subscriptionid' 'deploymentid' 'vmcreated' 'vmdeleted' 'maxcpu' 'avgcpu' 'p95maxcpu' 'vmcategory' 'vmcorecountbucket' 'vmmemorybucket']<line_sep>required_headers=['vmid' 'subscriptionid' 'deploymentid' 'vmcreated' 'vmdeleted' 'vmcategory' 'vmcorecountbucket' 'vmmemorybucket']<line_sep>vm_table=pd.read_csv(raw_vm_table_file header=<none> index_col=<false> names=headers)<line_sep>vm_table=vm_table.loc[: required_headers]<line_sep># Convert to tick by dividing by 300 (5 minutes). vm_table['vmcreated']=pd.to_numeric(vm_table['vmcreated'] errors="coerce" downcast="integer")<floordiv>300<line_sep>vm_table['vmdeleted']=pd.to_numeric(vm_table['vmdeleted'] errors="coerce" downcast="integer")<floordiv>300<line_sep># The lifetime of the VM is deleted time - created time + 1 (tick). vm_table['lifetime']=vm_table['vmdeleted']-vm_table['vmcreated']+1<line_sep>vm_table['vmcategory']=vm_table['vmcategory'].map(self._category_map)<line_sep># Transform vmcorecount '>24' bucket to 32 and vmmemory '>64' to 128. vm_table=vm_table.replace({'vmcorecountbucket':'>24'} 32)<line_sep>vm_table=vm_table.replace({'vmmemorybucket':'>64'} 128)<line_sep>vm_table['vmcorecountbucket']=pd.to_numeric(vm_table['vmcorecountbucket'] errors="coerce" downcast="integer")<line_sep>vm_table['vmmemorybucket']=pd.to_numeric(vm_table['vmmemorybucket'] errors="coerce" downcast="integer")<line_sep>vm_table.dropna(inplace=<true>)<line_sep>vm_table=vm_table.sort_values(by='vmcreated' ascending=<true>)<line_sep># Generate ID map. vm_id_map=self._generate_id_map(vm_table['vmid'].unique())<line_sep>sub_id_map=self._generate_id_map(vm_table['subscriptionid'].unique())<line_sep>deployment_id_map=self._generate_id_map(vm_table['deploymentid'].unique())<line_sep>id_maps=(vm_id_map sub_id_map deployment_id_map)<line_sep># Mapping IDs. vm_table['vmid']=vm_table['vmid'].map(vm_id_map)<line_sep>vm_table['subscriptionid']=vm_table['subscriptionid'].map(sub_id_map)<line_sep>vm_table['deploymentid']=vm_table['deploymentid'].map(deployment_id_map)<line_sep># Sampling the VM table. # 2695548 is the total number of vms in the original Azure public dataset. <if_stmt>self._sample<l>2695548<block_start>vm_table=vm_table.sample(n=self._sample random_state=self._seed)<line_sep>vm_table=vm_table.sort_values(by='vmcreated' ascending=<true>)<block_end><return>id_maps vm_table<block_end><def_stmt>_convert_cpu_readings_id self old_data_path:str new_data_path:str vm_id_map:dict<block_start>"""Convert vmid in each cpu readings file."""<with_stmt>open(old_data_path 'r')<as>f_in<block_start>csv_reader=reader(f_in)<with_stmt>open(new_data_path 'w')<as>f_out<block_start>csv_writer=writer(f_out)<line_sep>csv_writer.writerow(['timestamp' 'vmid' 'maxcpu'])<for_stmt>row csv_reader# [timestamp, vmid, mincpu, maxcpu, avgcpu] <block_start><if_stmt>row[1]<in>vm_id_map<block_start>new_row=[int(row[0])<floordiv>300 vm_id_map[row[1]] row[3]]<line_sep>csv_writer.writerow(new_row)<block_end><block_end><block_end><block_end><block_end><def_stmt>_write_id_map_to_csv self id_maps<block_start>file_name=['vm_id_map' 'sub_id_map' 'deployment_id_map']<for_stmt>index range(len(id_maps))<block_start>id_map=id_maps[index]<with_stmt>open(os.path.join(self._raw_folder file_name[index])+'.csv' 'w')<as>f<block_start>csv_writer=writer(f)<line_sep>csv_writer.writerow(['original_id' 'new_id'])<for_stmt>key,value id_map.items()<block_start>csv_writer.writerow([key value])<block_end><block_end><block_end><block_end><def_stmt>_filter_out_vmid self vm_table:pd.DataFrame vm_id_map:dict<arrow>dict<block_start>new_id_map={}<for_stmt>key,value vm_id_map.items()<block_start><if_stmt>value<in>vm_table.vmid.values<block_start>new_id_map[key]=value<block_end><block_end><return>new_id_map<block_end><def_stmt>_preprocess self<block_start>logger.info_green("Process vmtable data.")<line_sep># Process vmtable file. id_maps,vm_table=self._process_vm_table(raw_vm_table_file=self._raw_vm_table_file)<line_sep>filtered_vm_id_map=self._filter_out_vmid(vm_table=vm_table vm_id_map=id_maps[0])<with_stmt>open(self._clean_file mode="w" encoding="utf-8" newline="")<as>f<block_start>vm_table.to_csv(f index=<false> header=<true>)<block_end>logger.info_green("Writing id maps file.")<line_sep>self._write_id_map_to_csv(id_maps=id_maps)<line_sep>logger.info_green("Reading cpu data.")<line_sep># Process every cpu readings file. <for_stmt>clean_cpu_readings_file_name self._clean_cpu_readings_file_name_list<block_start>raw_cpu_readings_file_name=clean_cpu_readings_file_name.split(".")[0]+"_raw.csv"<line_sep>raw_cpu_readings_file=os.path.join(self._raw_folder raw_cpu_readings_file_name)<line_sep>clean_cpu_readings_file=os.path.join(self._clean_folder clean_cpu_readings_file_name)<line_sep># Convert vmid. logger.info_green(f"Process {clean_cpu_readings_file}.")<line_sep>self._convert_cpu_readings_id(old_data_path=raw_cpu_readings_file new_data_path=clean_cpu_readings_file vm_id_map=filtered_vm_id_map)<block_end><block_end><def_stmt>build self<block_start>super().build()<for_stmt>clean_cpu_readings_file_name self._clean_cpu_readings_file_name_list<block_start>clean_cpu_readings_file=os.path.join(self._clean_folder clean_cpu_readings_file_name)<if_stmt>os.path.exists(clean_cpu_readings_file)<block_start>build_file_name=clean_cpu_readings_file_name.split(".")[0]+".bin"<line_sep>build_file=os.path.join(self._build_folder build_file_name)<line_sep>logger.info_green(f"Building binary data from {clean_cpu_readings_file} to {build_file}.")<line_sep>cpu_meta_file=os.path.join(self._meta_folder "cpu_readings.yml")<line_sep>convert(meta=cpu_meta_file file=[clean_cpu_readings_file] output=build_file)<block_end><else_stmt><block_start>logger.warning_yellow(f"Not found cleaned data: {self._clean_file}.")<block_end><block_end><block_end><block_end><class_stmt>VmSchedulingTopology(DataTopology)<block_start><def_stmt>__init__ self topology:str source:str sample:int seed:int is_temp=<false><block_start>super().__init__()<line_sep>self._data_pipeline["vm_data"]=VmSchedulingPipeline(topology=topology source=source sample=sample seed=seed is_temp=is_temp)<block_end><block_end><class_stmt>VmSchedulingProcess<block_start>"""Contains all predefined data topologies of vm_scheduling scenario."""<line_sep>meta_file_name="source_urls.yml"<line_sep>meta_root=os.path.join(StaticParameter.data_root "vm_scheduling/meta")<def_stmt>__init__ self is_temp:bool=<false><block_start>self.topologies={}<line_sep>self.meta_root=os.path.expanduser(self.meta_root)<line_sep>self._meta_path=os.path.join(self.meta_root self.meta_file_name)<with_stmt>open(self._meta_path)<as>fp<block_start>self._conf=safe_load(fp)<for_stmt>topology self._conf["vm_data"].keys()<block_start>self.topologies[topology]=VmSchedulingTopology(topology=topology source=self._conf["vm_data"][topology]["remote_url"] sample=self._conf["vm_data"][topology]["sample"] seed=self._conf["vm_data"][topology]["seed"] is_temp=is_temp)<block_end><block_end><block_end><block_end>
<import_stmt>grpc<import_stmt>simple_pb2<import_stmt>simple_pb2_grpc<def_stmt>run <block_start><with_stmt>grpc.insecure_channel('localhost:50051')<as>channel<block_start>stub=simple_pb2_grpc.InferenceStub(channel)<def_stmt>requests <block_start>messages=[simple_pb2.Input(batch_id=i)<for>i range(10)]<for_stmt>msg messages<block_start>print("Sending Stream batch_id={}".format(msg.batch_id))<line_sep><yield>msg<block_end><block_end>responses=stub.BatchedCompute(requests())<for_stmt>resp responses<block_start>print("Received msg on stream with batch_id={}".format(resp.batch_id))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>run()<block_end>
<import_from_stmt>datetime datetime<import_from_stmt>urllib.parse urlparse<class_stmt>Throttle<block_start><def_stmt>__init__ self delay<block_start>self.domains={}# 可以放到数据库中 self.delay=delay<block_end># 两次间隔下载间隔 <def_stmt>wait_url self url_str# 以netloc为基础进行休眠 <block_start>domain_url=urlparse(url_str).netloc<line_sep>last_accessed=self.domains.get(domain_url)# 根据字典键获取值 <if_stmt>self.delay<g>0<and>last_accessed<is><not><none># 计算当前时间和上次访问时间段间隔,然后被规则时间减去,如果大于0,说明间隔时间不到,要继续休眠,否则的话直接下载下个网页 <block_start>sleep_interval=self.delay-(datetime.now()-last_accessed).seconds<if_stmt>sleep_interval<g>0<block_start><import_stmt>time<line_sep>time.sleep(sleep_interval)<block_end><block_end>self.domains[domain_url]=datetime.now()<block_end><block_end>
<import_from_stmt>.utils geocode_table<line_sep>
<import_stmt>os sys<line_sep>root_path=os.path.realpath(__file__).split('/evaluate/multipose_keypoint_val.py')[0]<line_sep>os.chdir(root_path)<line_sep>sys.path.append(root_path)<import_from_stmt>training.batch_processor batch_processor<import_from_stmt>network.posenet poseNet<import_from_stmt>datasets.coco get_loader<import_from_stmt>evaluate.tester Tester<line_sep># Hyper-params coco_root='/data/COCO/'<line_sep>backbone='resnet101'# 'resnet50' data_dir=coco_root+'images/'<line_sep>mask_dir=coco_root<line_sep>json_path=coco_root+'COCO.json'<line_sep>inp_size=480# input size 480*480 feat_stride=4<line_sep># Set Training parameters params=Tester.TestParams()<line_sep>params.subnet_name='keypoint_subnet'<line_sep>params.gpus=[0]<line_sep>params.ckpt='./demo/models/ckpt_baseline_resnet101.h5'<line_sep>params.batch_size=6<times>len(params.gpus)<line_sep>params.print_freq=50<line_sep># validation data valid_data=get_loader(json_path data_dir mask_dir inp_size feat_stride preprocess='resnet' batch_size=params.batch_size-2<times>len(params.gpus) training=<false> shuffle=<false> num_workers=4 subnet=params.subnet_name)<line_sep>print('val dataset len: {}'.format(len(valid_data.dataset)))<line_sep># model <if_stmt>backbone<eq>'resnet101'<block_start>model=poseNet(101)<block_end><elif_stmt>backbone<eq>'resnet50'<block_start>model=poseNet(50)<block_end><for_stmt>name,module model.named_children()<block_start><for_stmt>para module.parameters()<block_start>para.requires_grad=<false><block_end><block_end>tester=Tester(model params batch_processor valid_data)<line_sep>tester.val()<line_sep>
<import_from_stmt>base *<import_stmt>requests<line_sep>ASSERT_RESPONSE=b"Hello world!"<line_sep>RESPONSE=[b"Hello " b"world!"]<class_stmt>App(BaseApp)<block_start>environ=<none><def_stmt>__call__ self environ start_response<block_start>status='200 OK'<line_sep>response_headers=[('Content-type' 'text/plain')]<line_sep>start_response(status response_headers)<line_sep>self.environ=environ.copy()<line_sep>print(environ)<line_sep><return>RESPONSE<block_end><block_end><def_stmt>test_simple <block_start><def_stmt>_call <block_start><assert_stmt>(<true>)<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call)<line_sep>server.run(App())<block_end><def_stmt>test_args <block_start><def_stmt>_call arg<block_start><assert_stmt>(arg<eq>1)<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call 1)<line_sep>server.run(App())<block_end><def_stmt>test_args2 <block_start><def_stmt>_call a b<block_start><assert_stmt>(a<eq>1)<assert_stmt>(b<eq>"ABC")<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call 1 "ABC")<line_sep>server.run(App())<block_end><def_stmt>test_kwargs <block_start><def_stmt>_call a=0 b="test"<block_start><assert_stmt>(a<eq>1)<assert_stmt>(b<eq>"ABC")<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call b="ABC" a=1)<line_sep>server.run(App())<block_end><def_stmt>test_kwargs2 <block_start><def_stmt>_call a b="test" c=<false><block_start><assert_stmt>(a<eq>1)<assert_stmt>(b<eq>"ABC")<assert_stmt>(c<eq><true>)<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call 1 c=<true> b="ABC")<line_sep>server.run(App())<block_end><def_stmt>test_kwargs3 <block_start><def_stmt>_call a b="test" c=<false><block_start><assert_stmt>(a<eq>1)<assert_stmt>(b<eq>"test")<assert_stmt>(c<eq><false>)<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call 1)<line_sep>server.run(App())<block_end><def_stmt>test_time <block_start><def_stmt>_call a b<block_start><assert_stmt>(a<eq>1)<assert_stmt>(b<eq>"ABC")<line_sep>server.shutdown()<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(5 _call 1 "ABC")<line_sep>server.run(App())<block_end><def_stmt>test_nested <block_start><def_stmt>_schedule_call <block_start>server.shutdown()<block_end><def_stmt>_call <block_start>server.schedule_call(0 _schedule_call)<block_end>server.listen(("0.0.0.0" 8000))<line_sep>server.schedule_call(0 _call)<line_sep>server.run(App())<block_end><def_stmt>test_many <block_start>l=[i<for>i range(10)]<def_stmt>_schedule_call <block_start><assert_stmt>(len(l)<eq>0)<line_sep>server.shutdown()<block_end><def_stmt>_call i<block_start>l.pop()<block_end>server.listen(("0.0.0.0" 8000))<for_stmt>i l<block_start>server.schedule_call(0 _call i)<block_end>server.schedule_call(1 _schedule_call)<line_sep>server.run(App())<block_end>
# encoding: utf-8 """ @author: loveletter @contact: <EMAIL> """<import_stmt>torchvision.transforms<as>T<import_from_stmt>.transforms RandomErasing<def_stmt>build_transforms cfg is_train=<true><block_start>normalize_transform=T.Normalize(mean=cfg.INPUT.PIXEL_MEAN std=cfg.INPUT.PIXEL_STD)<if_stmt>is_train<block_start>transform_=T.Compose([T.Resize(cfg.INPUT.SIZE_TRAIN) T.RandomRotation(cfg.INPUT.RO_DEGREE) T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB saturation=cfg.INPUT.SATURA_PROB contrast=cfg.INPUT.CONTRAST_PROB hue=cfg.INPUT.HUE_PROB) RandomErasing(probability=cfg.INPUT.RE_PROB mean=cfg.INPUT.PIXEL_MEAN) T.Pad(cfg.INPUT.PADDING) T.RandomCrop(cfg.INPUT.SIZE_TRAIN) T.ToTensor() normalize_transform])<line_sep>transform_body=T.Compose([T.Resize(cfg.PART.SIZE_BODY) T.RandomRotation(cfg.INPUT.RO_DEGREE) T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB saturation=cfg.INPUT.SATURA_PROB contrast=cfg.INPUT.CONTRAST_PROB hue=cfg.INPUT.HUE_PROB) RandomErasing(probability=cfg.INPUT.RE_PROB mean=cfg.INPUT.PIXEL_MEAN) T.Pad(cfg.INPUT.PADDING) T.RandomCrop(cfg.PART.SIZE_BODY) T.ToTensor() normalize_transform])<line_sep>transform_paw=T.Compose([T.Resize(cfg.PART.SIZE_PAW) T.RandomRotation(cfg.INPUT.RO_DEGREE) T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB saturation=cfg.INPUT.SATURA_PROB contrast=cfg.INPUT.CONTRAST_PROB hue=cfg.INPUT.HUE_PROB) RandomErasing(probability=cfg.INPUT.RE_PROB mean=cfg.INPUT.PIXEL_MEAN) T.Pad(cfg.INPUT.PADDING) T.RandomCrop(cfg.PART.SIZE_PAW) T.ToTensor() normalize_transform])<line_sep><return>transform_ transform_body transform_paw<block_end><else_stmt><block_start>transform=T.Compose([T.Resize(cfg.INPUT.SIZE_TEST) T.ToTensor() normalize_transform])<line_sep><return>transform<block_end><block_end>
''' Created on 2016/5/27 :author: hubo '''<import_from_stmt>vlcp.server.module Module ModuleLoadStateChanged<import_from_stmt>vlcp.config.config defaultconfig<import_stmt>pkgutil<line_sep>@defaultconfig<class_stmt>AutoLoad(Module)<block_start>''' Auto load some modules from a package. Usually used to load network plugins. '''<line_sep>autosuccess=<false><line_sep># Auto load packages from some packages _default_autoloadpackages=('vlcp.service.sdn.plugins' )<def_stmt>__init__ self server<block_start>Module.__init__(self server)<block_end><async_keyword><def_stmt>load self container<block_start><await>Module.load(self container)<line_sep>loadmodules=[]<for_stmt>p self.autoloadpackages<block_start><try_stmt><block_start><def_stmt>onerror name<block_start>self._logger.warning("Autoload package %r on package %r failed" name p)<block_end>pkg=__import__(p fromlist=('dummy' ))<for_stmt>_,name,ispkg pkgutil.walk_packages(pkg.__path__ p+'.' onerror)<block_start><if_stmt><not>ispkg<block_start><try_stmt><block_start>pymod=__import__(name fromlist=('dummy' ))<for_stmt>m vars(pymod).values()<block_start><if_stmt>isinstance(m type)<and>issubclass(m Module)<and>getattr(m '__module__' '')<eq>name<block_start>loadmodules.append(m)<block_end><block_end><block_end><except_stmt>Exception<block_start>self._logger.warning('Autoload module %r failed' name exc_info=<true>)<block_end><block_end><block_end><block_end><except_stmt>Exception<block_start>self._logger.warning('Autoload package %r failed' p exc_info=<true>)<block_end><block_end><if_stmt>loadmodules<block_start><await>container.execute_all([self.server.moduleloader.loadmodule(m)<for>m loadmodules])<block_end><await>self.changestate(ModuleLoadStateChanged.SUCCEEDED container)<block_end><async_keyword><def_stmt>unload self container force=<false><block_start><await>Module.unload(self container force=force)<block_end><block_end>
"""Wiener process."""<import_from_stmt>stochastic.processes.continuous.brownian_motion BrownianMotion<class_stmt>WienerProcess(BrownianMotion)<block_start>"""Wiener process, or standard Brownian motion. .. image:: _static/wiener_process.png :scale: 50% :param float t: the right hand endpoint of the time interval :math:`[0,t]` for the process :param numpy.random.Generator rng: a custom random number generator """<def_stmt>__init__ self t=1 rng=<none><block_start>super().__init__(drift=0 scale=1 t=t rng=rng)<block_end><def_stmt>__str__ self<block_start><return>"Wiener process on [0, {t}]".format(t=str(self.t))<block_end><def_stmt>__repr__ self<block_start><return>"WienerProcess(t={t})".format(t=str(self.t))<block_end><block_end>
<import_from_stmt>tensorflow.python.framework dtypes<import_stmt>sys<import_from_stmt>nao.run graph_summary<def_stmt>eprint *args **kwargs<block_start>print(*args file=sys.stderr **kwargs)<block_end><def_stmt>LogWithStep summary_protobuf global_step<arrow>dtypes.int64<block_start>graph_summary.get_summary_writer().add_summary(summary_protobuf global_step=global_step)<line_sep><return>0<block_end><def_stmt>Log summary_protobuf<arrow>dtypes.int64<block_start>eprint("Writing summary!")<line_sep>graph_summary.get_summary_writer().add_summary(summary_protobuf)<line_sep><return>0<block_end><def_stmt>Debug x<arrow>dtypes.int64<block_start>eprint("Debug %s"%x)<line_sep><return>0<block_end>
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long <def_stmt>cf_acrtransfer cli_ctx *_<block_start><import_from_stmt>azure.cli.core.commands.client_factory get_mgmt_service_client<import_from_stmt>azext_acrtransfer.vendored_sdks.containerregistry.v2019_12_01_preview._container_registry_management_client ContainerRegistryManagementClient<line_sep><return>get_mgmt_service_client(cli_ctx ContainerRegistryManagementClient)<block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>django.contrib.admin ModelAdmin<import_from_stmt>river.tests.models BasicTestModel<class_stmt>BasicTestModelAdmin(ModelAdmin)<block_start><pass><block_end>admin.site.register(BasicTestModel BasicTestModelAdmin)<line_sep>
"""This module provides utility functions for pyro"""<line_sep>__all__=['runparams' 'profile' 'plot_tools']<line_sep>
# ================================================================================================== # Copyright 2011 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== <import_stmt>pytest<import_from_stmt>twitter.common.rpc.finagle.trace SpanId<def_stmt>test_span_from_value # hex regex works <block_start><with_stmt>pytest.raises(SpanId.InvalidSpanId)<block_start>SpanId.from_value('1234')<block_end><assert_stmt>SpanId.from_value('0000000000001234').value<eq>int('1234' 16)<assert_stmt>SpanId.from_value(1234).value<eq>1234<assert_stmt>SpanId.from_value(SpanId(1234)).value<eq>1234<assert_stmt>SpanId.from_value(<none>).value<is><none><block_end>
'''This example demonstrates the use of Convolution1D for text classification. Gets to 0.89 test accuracy after 2 epochs. 90s/epoch on Intel i5 2.4Ghz CPU. 10s/epoch on Tesla K40 GPU. '''<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>keras.callbacks<import_from_stmt>keras.preprocessing sequence<import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers Dense Dropout Activation<import_from_stmt>keras.layers Embedding<import_from_stmt>keras.layers Conv1D GlobalMaxPooling1D<import_from_stmt>keras.datasets imdb<import_from_stmt>example_correctness_test_utils TrainingHistory StopwatchManager<line_sep># set parameters: max_features=5000<line_sep>maxlen=400<line_sep>batch_size=32<line_sep>embedding_dims=50<line_sep>filters=250<line_sep>kernel_size=3<line_sep>hidden_dims=250<line_sep>epochs=2<line_sep>print('Loading data...')<line_sep>(x_train y_train),(x_test y_test)=imdb.load_data(num_words=max_features)<line_sep>train_input_truncation=1500<line_sep>test_input_truncation=200<line_sep>x_train=x_train[:train_input_truncation]<line_sep>y_train=y_train[:train_input_truncation]<line_sep>x_test=x_test[:test_input_truncation]<line_sep>y_test=y_test[:test_input_truncation]<line_sep>print(len(x_train) 'train sequences')<line_sep>print(len(x_test) 'test sequences')<line_sep>print('Pad sequences (samples x time)')<line_sep>x_train=sequence.pad_sequences(x_train maxlen=maxlen)<line_sep>x_test=sequence.pad_sequences(x_test maxlen=maxlen)<line_sep>print('x_train shape:' x_train.shape)<line_sep>print('x_test shape:' x_test.shape)<line_sep>print('Build model...')<line_sep>model=Sequential()<line_sep># we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(max_features embedding_dims input_length=maxlen))<line_sep>model.add(Dropout(0.2))<line_sep># we add a Convolution1D, which will learn filters # word group filters of size filter_length: model.add(Conv1D(filters kernel_size padding='valid' activation='relu' strides=1))<line_sep># we use max pooling: model.add(GlobalMaxPooling1D())<line_sep># We add a vanilla hidden layer: model.add(Dense(hidden_dims))<line_sep>model.add(Dropout(0.2))<line_sep>model.add(Activation('relu'))<line_sep># We project onto a single unit output layer, and squash it with a sigmoid: model.add(Dense(1))<line_sep>model.add(Activation('sigmoid'))<line_sep>history=TrainingHistory()<line_sep>sw_manager=StopwatchManager(stop_watch compile_stop_watch)<line_sep>model.compile(loss='binary_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep>model.fit(x_train y_train batch_size=batch_size epochs=epochs validation_data=(x_test y_test) callbacks=[history sw_manager])<line_sep>output.contents=np.array([history.acc history.loss history.val_acc history.val_loss])<line_sep>
<import_stmt>unittest<import_from_stmt>jmetal.core.solution Solution<import_from_stmt>jmetal.util.constraint_handling is_feasible number_of_violated_constraints overall_constraint_violation_degree feasibility_ratio<class_stmt>ConstraintHandlingTestCases(unittest.TestCase)<block_start><def_stmt>test_should_is_feasible_return_true_if_the_solution_has_no_constraints self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=0)<line_sep>self.assertEqual(<true> is_feasible(solution))<block_end><def_stmt>test_should_is_feasible_return_true_if_the_solution_has_constraints_and_is_feasible self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=1)<line_sep>solution.constraints[0]=0<line_sep>self.assertEqual(<true> is_feasible(solution))<block_end><def_stmt>test_should_is_feasible_return_false_if_the_solution_has_is_not_feasible self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=1)<line_sep>solution.constraints[0]=-1<line_sep>self.assertEqual(<false> is_feasible(solution))<block_end><def_stmt>test_should_number_of_violated_constraints_return_zero_if_the_solution_has_no_constraints self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=0)<line_sep>self.assertEqual(0 number_of_violated_constraints(solution))<block_end><def_stmt>test_should_number_of_violated_constraints_return_zero_if_the_solution_has_not_violated_constraints self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=2)<line_sep>self.assertEqual(0 number_of_violated_constraints(solution))<block_end><def_stmt>test_should_number_of_violated_constraints_return_the_right_number_of_violated_constraints self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=2)<line_sep>solution.constraints[0]=0<line_sep>solution.constraints[1]=-2<line_sep>self.assertEqual(1 number_of_violated_constraints(solution))<block_end><def_stmt>test_should_constraint_violation_degree_return_zero_if_the_solution_has_no_constraints self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=0)<line_sep>self.assertEqual(0 overall_constraint_violation_degree(solution))<block_end><def_stmt>test_should_constraint_violation_degree_return_zero_if_the_solution_has_not_violated_constraints self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=2)<line_sep>self.assertEqual(0 overall_constraint_violation_degree(solution))<block_end><def_stmt>test_should_constraint_violation_degree_return_the_right_violation_degree self<arrow><none><block_start>solution=Solution(number_of_variables=2 number_of_objectives=2 number_of_constraints=2)<line_sep>solution.constraints[0]=-1<line_sep>solution.constraints[1]=-2<line_sep>self.assertEqual(-3 overall_constraint_violation_degree(solution))<block_end><def_stmt>test_should_feasibility_ratio_raise_and_exception_if_the_solution_list_is_empty self<arrow><none><block_start><with_stmt>self.assertRaises(Exception)<block_start>feasibility_ratio([])<block_end><block_end><def_stmt>test_should_feasibility_ratio_return_zero_if_all_the_solutions_in_a_list_are_unfeasible self<arrow><none><block_start>solution1=Solution(2 2 2)<line_sep>solution2=Solution(2 2 2)<line_sep>solution1.constraints[0]=0<line_sep>solution1.constraints[1]=-1<line_sep>solution2.constraints[0]=-2<line_sep>solution2.constraints[1]=0<line_sep>self.assertEqual(0 feasibility_ratio([solution1 solution2]))<block_end><def_stmt>test_should_feasibility_ratio_return_one_if_all_the_solutions_in_a_list_are_feasible self<arrow><none><block_start>solution1=Solution(2 2 2)<line_sep>solution2=Solution(2 2 2)<line_sep>solution1.constraints[0]=0<line_sep>solution1.constraints[1]=0<line_sep>solution2.constraints[0]=0<line_sep>solution2.constraints[1]=0<line_sep>self.assertEqual(1.0 feasibility_ratio([solution1 solution2]))<block_end><def_stmt>test_should_feasibility_ratio_return_the_right_percentage_of_feasible_solutions self<arrow><none><block_start>solution1=Solution(2 2 1)<line_sep>solution2=Solution(2 2 1)<line_sep>solution3=Solution(2 2 1)<line_sep>solution1.constraints[0]=-1<line_sep>solution2.constraints[0]=0<line_sep>solution3.constraints[0]=-2<line_sep>self.assertEqual(1/3 feasibility_ratio([solution1 solution2 solution3]))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>fastapi FastAPI Request<import_from_stmt>fastapi.exceptions RequestValidationError<import_from_stmt>fastapi.responses JSONResponse<import_from_stmt>starlette.responses Response<try_stmt><block_start><import_from_stmt>starlette_context.middleware RawContextMiddleware<import_from_stmt>starlette_context.plugins RequestIdPlugin<block_end><except_stmt>ImportError<block_start><pass><block_end><import_from_stmt>galaxy.exceptions MessageException<import_from_stmt>galaxy.web.framework.base walk_controller_modules<import_from_stmt>galaxy.web.framework.decorators api_error_message validation_error_to_message_exception <def_stmt>add_exception_handler app:FastAPI<arrow><none><block_start>@app.exception_handler(RequestValidationError)<async_keyword><def_stmt>validate_exception_middleware request:Request exc:RequestValidationError<arrow>Response<block_start>exc=validation_error_to_message_exception(exc)<line_sep>error_dict=api_error_message(<none> exception=exc)<line_sep><return>JSONResponse(status_code=400 content=error_dict)<block_end>@app.exception_handler(MessageException)<async_keyword><def_stmt>message_exception_middleware request:Request exc:MessageException<arrow>Response<block_start>error_dict=api_error_message(<none> exception=exc)<line_sep><return>JSONResponse(status_code=exc.status_code content=error_dict)<block_end><block_end><def_stmt>add_request_id_middleware app:FastAPI<block_start>app.add_middleware(RawContextMiddleware plugins=(RequestIdPlugin(force_new_uuid=<true>) ))<block_end><def_stmt>include_all_package_routers app:FastAPI package_name:str<block_start><for_stmt>_,module walk_controller_modules(package_name)<block_start>router=getattr(module "router" <none>)<if_stmt>router<block_start>app.include_router(router)<block_end><block_end><block_end>
<import_stmt>torch<import_from_stmt>up.tasks.det.models.utils.assigner map_rois_to_level<import_from_stmt>up.tasks.det.models.utils.bbox_helper clip_bbox filter_by_size <def_stmt>mlvl_extract_roi_features rois x_features fpn_levels fpn_strides base_scale roi_extractor return_recover_inds=<false><block_start>x_rois,recover_inds=map_rois_to_level(fpn_levels base_scale rois)<line_sep>mlvl_rois=[]<line_sep>mlvl_features=[]<line_sep>mlvl_strides=[]<for_stmt>lvl_idx fpn_levels<block_start><if_stmt>x_rois[lvl_idx].numel()<g>0<block_start>mlvl_rois.append(x_rois[lvl_idx])<line_sep>mlvl_features.append(x_features[lvl_idx])<line_sep>mlvl_strides.append(fpn_strides[lvl_idx])<block_end><block_end><assert_stmt>len(mlvl_rois)<g>0 "No rois provided for mimic stage"<line_sep>pooled_feats=[roi_extractor(*args)<for>args zip(mlvl_rois mlvl_features mlvl_strides)]<if_stmt>return_recover_inds<block_start><return>torch.cat(pooled_feats dim=0) recover_inds<block_end><else_stmt><block_start><return>torch.cat(pooled_feats dim=0)<block_end><block_end><def_stmt>mlvl_extract_gt_masks gt_bboxes fpn_levels fpn_strides base_scale featmap_sizes<block_start>gt_tensors=[]<for_stmt>b_ix range(len(gt_bboxes))<block_start>gt,_=filter_by_size(gt_bboxes[b_ix] min_size=1)<line_sep>bdx=gt.new_ones(gt.shape[0] 1)<times>b_ix<line_sep>gt_tensors.append(torch.cat([bdx gt[: :4]] dim=1))<block_end>gt_bboxes,_=map_rois_to_level(fpn_levels base_scale torch.cat(gt_tensors dim=0))<line_sep>imit_range=[0 0 0 0 0]<with_stmt>torch.no_grad()<block_start>masks=[]<for_stmt>idx range(len(featmap_sizes))<block_start>b,_,h,w=featmap_sizes[idx]<line_sep>gt_level=gt_bboxes[idx]<line_sep>mask=gt_level.new_zeros(b h w)<for_stmt>gt gt_level<block_start>gt_level_map=gt[1:]/fpn_strides[idx]<line_sep>lx=max(int(gt_level_map[0])-imit_range[idx] 0)<line_sep>rx=min(int(gt_level_map[2])+imit_range[idx] w)<line_sep>ly=max(int(gt_level_map[1])-imit_range[idx] 0)<line_sep>ry=min(int(gt_level_map[3])+imit_range[idx] h)<if_stmt>(lx<eq>rx)<or>(ly<eq>ry)<block_start>mask[int(gt[0]) ly lx]<augadd>1<block_end><else_stmt><block_start>mask[int(gt[0]) ly:ry lx:rx]<augadd>1<block_end><block_end>mask=(mask<g>0).type_as(gt_level)<line_sep>masks.append(mask)<block_end><block_end><return>masks<block_end><def_stmt>match_gts proposals gt_bboxes ignore_regions image_info matcher<block_start>B=len(gt_bboxes)<if_stmt>ignore_regions<is><none><block_start>ignore_regions=[<none>]<times>B<block_end>labels=proposals.new_zeros(proposals.shape[0]).long()<for_stmt>b_ix range(B)<block_start>bind=torch.where(proposals[: 0]<eq>b_ix)[0]<line_sep>rois=proposals[bind]<line_sep># rois = proposals[proposals[:, 0] == b_ix] <if_stmt>rois.numel()<g>0# remove batch idx, score & label <block_start>rois=rois[: 1:1+4]<block_end><else_stmt><block_start>rois=rois.view(-1 4)<block_end># filter gt_bboxes which are too small gt,_=filter_by_size(gt_bboxes[b_ix] min_size=1)<line_sep># clip rois which are out of bounds rois=clip_bbox(rois image_info[b_ix])<line_sep>rois_target_gt,overlaps=matcher.match(rois gt ignore_regions[b_ix] return_max_overlaps=<true>)<line_sep>pos_inds=(rois_target_gt<ge>0).long()<line_sep>labels[bind]=pos_inds<block_end><return>labels<block_end>
<import_from_stmt>typing Optional<import_from_stmt>botocore.client BaseClient<import_from_stmt>typing Dict<import_from_stmt>typing Union<import_from_stmt>botocore.paginate Paginator<import_from_stmt>datetime datetime<import_from_stmt>botocore.waiter Waiter<import_from_stmt>typing List<class_stmt>Client(BaseClient)<block_start><def_stmt>can_paginate self operation_name:str=<none><block_start><pass><block_end><def_stmt>delete_scaling_policy self PolicyName:str ServiceNamespace:str ResourceId:str ScalableDimension:str<arrow>Dict<block_start><pass><block_end><def_stmt>delete_scheduled_action self ServiceNamespace:str ScheduledActionName:str ResourceId:str ScalableDimension:str<arrow>Dict<block_start><pass><block_end><def_stmt>deregister_scalable_target self ServiceNamespace:str ResourceId:str ScalableDimension:str<arrow>Dict<block_start><pass><block_end><def_stmt>describe_scalable_targets self ServiceNamespace:str ResourceIds:List=<none> ScalableDimension:str=<none> MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_scaling_activities self ServiceNamespace:str ResourceId:str=<none> ScalableDimension:str=<none> MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_scaling_policies self ServiceNamespace:str PolicyNames:List=<none> ResourceId:str=<none> ScalableDimension:str=<none> MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>describe_scheduled_actions self ServiceNamespace:str ScheduledActionNames:List=<none> ResourceId:str=<none> ScalableDimension:str=<none> MaxResults:int=<none> NextToken:str=<none><arrow>Dict<block_start><pass><block_end><def_stmt>generate_presigned_url self ClientMethod:str=<none> Params:Dict=<none> ExpiresIn:int=<none> HttpMethod:str=<none><block_start><pass><block_end><def_stmt>get_paginator self operation_name:str=<none><arrow>Paginator<block_start><pass><block_end><def_stmt>get_waiter self waiter_name:str=<none><arrow>Waiter<block_start><pass><block_end><def_stmt>put_scaling_policy self PolicyName:str ServiceNamespace:str ResourceId:str ScalableDimension:str PolicyType:str=<none> StepScalingPolicyConfiguration:Dict=<none> TargetTrackingScalingPolicyConfiguration:Dict=<none><arrow>Dict<block_start><pass><block_end><def_stmt>put_scheduled_action self ServiceNamespace:str ScheduledActionName:str ResourceId:str ScalableDimension:str Schedule:str=<none> StartTime:datetime=<none> EndTime:datetime=<none> ScalableTargetAction:Dict=<none><arrow>Dict<block_start><pass><block_end><def_stmt>register_scalable_target self ServiceNamespace:str ResourceId:str ScalableDimension:str MinCapacity:int=<none> MaxCapacity:int=<none> RoleARN:str=<none><arrow>Dict<block_start><pass><block_end><block_end>
''' Function: show the effect of trained model used in game Author: Charles 微信公众号: Charles的皮卡丘 '''<import_stmt>torch<import_stmt>config<import_from_stmt>nets.nets DQNet DQNAgent<import_from_stmt>gameAPI.game GamePacmanAgent<line_sep>'''run demo'''<def_stmt>runDemo <block_start><if_stmt>config.operator<eq>'ai'<block_start>game_pacman_agent=GamePacmanAgent(config)<line_sep>dqn_net=DQNet(config)<line_sep>dqn_net.load_state_dict(torch.load(config.weightspath))<line_sep>dqn_agent=DQNAgent(game_pacman_agent dqn_net config)<line_sep>dqn_agent.test()<block_end><elif_stmt>config.operator<eq>'person'<block_start>GamePacmanAgent(config).runGame()<block_end><else_stmt><block_start><raise>ValueError('config.operator should be <ai> or <person>...')<block_end><block_end>'''run'''<if_stmt>__name__<eq>'__main__'<block_start>runDemo()<block_end>
<import_from_stmt>LightPipes *<import_stmt>matplotlib.pyplot<as>plt<line_sep>GridSize=10<times>mm<line_sep>GridDimension=256<line_sep>lambda_=1000<times>nm#lambda_ is used because lambda is a Python build-in function. R=2.5<times>mm#Radius of the aperture xs=1<times>mm<line_sep>ys=1<times>mm#shift of the aperture Field=Begin(GridSize lambda_ GridDimension)<line_sep>Field=CircScreen(0.7<times>mm 1<times>mm 1.5<times>mm Field)<line_sep>Field=RectScreen(1<times>mm 1<times>mm -1.5<times>mm -1.5<times>mm -0.002 Field)<line_sep>Field=RectScreen(1<times>mm 3.5<times>mm -2<times>mm 2.5<times>mm 30 Field)<line_sep>Field=GaussAperture(4<times>mm 0 0 1 Field)<line_sep>I=Intensity(0 Field)<line_sep>plt.imshow(I)<line_sep>plt.axis('off')<line_sep>plt.show()<line_sep>
<def_stmt>test_get_cpu_times device<block_start>result=device.cpu_times()<assert_stmt>result<is><not><none><block_end><def_stmt>test_get_cpu_percent device<block_start>percent=device.cpu_percent(interval=1)<assert_stmt>percent<is><not><none><assert_stmt>percent<ne>0<block_end><def_stmt>test_get_cpu_count device<block_start><assert_stmt>device.cpu_count()<eq>2<block_end>
<import_stmt>numpy<as>np<import_from_stmt>numpy.polynomial polynomial<as>poly<import_stmt>scipy.signal<as>signal<import_stmt>matplotlib.pyplot<as>plt<line_sep># Component values GAIN=1.0<line_sep>R6=10e3<line_sep>Ra=100e3<times>GAIN<line_sep>R10b=2e3+100e3<times>(1-GAIN)<line_sep>R11=15e3<line_sep>R12=422e3<line_sep>C3=0.1e-6<line_sep>C5=68e-9<line_sep>C7=82e-9<line_sep>C8=390e-12<line_sep>a0s=C7<times>C8<times>R10b<times>R11<times>R12<line_sep>a1s=C7<times>R10b<times>R11+C8<times>R12<times>(R10b+R11)<line_sep>a2s=R10b+R11<line_sep>b0s=a0s<line_sep>b1s=C7<times>R11<times>R12+a1s<line_sep>b2s=R12+a2s<line_sep>w,h=signal.freqs([b0s b1s b2s] [a0s a1s a2s] worN=np.logspace(1.3 4.3 1000)<times>(2<times>np.pi))<line_sep>plt.semilogx(w/(2<times>np.pi) 20<times>np.log10(np.abs(h+np.finfo(float).eps)))<line_sep>plt.show()<line_sep># Create impedances # z1Num = R6 # poly.Polynomial((1, R6 * (C3 + C5))) # z1Den = poly.Polynomial((0, C3, R6 * C3 * C5)) # z2Num = R10b + R11 # poly.Polynomial((R10b + R11, C7 * R10b * R11)) # z2Den = 1.0 # poly.Polynomial((1, C7 * R11)) # z3Num = R12 # z3Den = 1 # poly.Polynomial((1, C8 * R12)) # # Simplify # b_s = z1Den * Ra * (z3Den * z2Num + z2Den * z3Num) # a_s = z2Den * z3Den * (Ra * z1Den + z1Num) # print(b_s.coef) # print(a_s.coef) # w, h = signal.freqs(b_s.coef, a_s.coef, worN=np.logspace(0, 2, 1000)*(2*np.pi)) # plt.semilogx(w/(2*np.pi), 20*np.log10(np.abs(h+np.finfo(float).eps))) # plt.show()
<def_stmt>gen_value_string <block_start><return>'value'<block_end>
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>opennmt.models sequence_tagger<class_stmt>SequenceTaggerTest(tf.test.TestCase)<block_start><def_stmt>_testTagSchemeFlags self tag_fn labels predicted expected_true_positives expected_false_positives expected_false_negatives <block_start>labels=np.array([[tf.compat.as_bytes(c)<for>c labels]])<line_sep>predicted=np.array([[tf.compat.as_bytes(c)<for>c predicted]])<line_sep>gold_flags,predicted_flags=tag_fn(labels predicted)<line_sep>true_positives=tf.keras.metrics.TruePositives()<line_sep>false_positives=tf.keras.metrics.FalsePositives()<line_sep>false_negatives=tf.keras.metrics.FalseNegatives()<line_sep>true_positives.update_state(gold_flags predicted_flags)<line_sep>false_positives.update_state(gold_flags predicted_flags)<line_sep>false_negatives.update_state(gold_flags predicted_flags)<line_sep>tp=self.evaluate(true_positives.result())<line_sep>fp=self.evaluate(false_positives.result())<line_sep>fn=self.evaluate(false_negatives.result())<line_sep>self.assertEqual(expected_true_positives tp msg="true positives mismatch")<line_sep>self.assertEqual(expected_false_positives fp msg="false positives mismatch")<line_sep>self.assertEqual(expected_false_negatives fn msg="false negatives mismatch")<block_end><def_stmt>testBIOESFlags self<block_start>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["S-LOC"] ["S-ORG"] expected_true_positives=0 expected_false_positives=1 expected_false_negatives=1 )<line_sep>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["B-LOC" "I-LOC" "E-LOC"] ["B-LOC" "I-LOC" "E-LOC"] expected_true_positives=1 expected_false_positives=0 expected_false_negatives=0 )<line_sep>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["O" "B-LOC" "I-LOC" "E-LOC"] ["B-LOC" "I-LOC" "E-LOC" "O"] expected_true_positives=0 expected_false_positives=1 expected_false_negatives=1 )<line_sep>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["B-LOC" "I-LOC" "E-LOC"] ["B-LOC" "E-LOC" "S-LOC"] expected_true_positives=0 expected_false_positives=2 expected_false_negatives=1 )<line_sep>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["B-LOC" "I-LOC" "E-LOC"] ["S-LOC" "O" "O"] expected_true_positives=0 expected_false_positives=1 expected_false_negatives=1 )<line_sep>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["S-LOC" "O"] ["B-LOC" "E-LOC"] expected_true_positives=0 expected_false_positives=1 expected_false_negatives=1 )<line_sep>self._testTagSchemeFlags(sequence_tagger.flag_bioes_tags ["B-ORG" "E-ORG" "O" "B-PER" "E-PER" "O" "O" "O" "O" "B-MISC" "E-MISC" "O" ] ["B-ORG" "E-ORG" "S-PER" "S-PER" "O" "O" "O" "O" "O" "O" "O" "S-MISC" ] expected_true_positives=1 expected_false_positives=3 expected_false_negatives=2 )<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<import_from_future_stmt> unicode_literals<try_stmt><block_start><import_from_stmt>django.core.urlresolvers reverse<block_end><except_stmt>ModuleNotFoundError<block_start><import_from_stmt>django.urls reverse<block_end><import_from_stmt>django.db models<import_stmt>generic_scaffold<class_stmt>Book(models.Model)<block_start>title=models.CharField(max_length=128)<line_sep>author=models.CharField(max_length=128)<line_sep>category=models.CharField(max_length=32)<def_stmt>get_absolute_url self<block_start><return>reverse(self.detail_url_name args=[self.id])<block_end><def_stmt>__str__ self<block_start><return>'{0} {1} {2}'.format(self.title self.author self.category)<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>.sequenceCrossEntropyLoss SequenceCrossEntropyLoss<line_sep>__all__=['SequenceCrossEntropyLoss' ]<line_sep>
<import_from_stmt>sage.misc.lazy_import lazy_import<line_sep>lazy_import('sage.crypto.sbox' ['SBox' 'feistel_construction' 'misty_construction'] deprecation=22986)<line_sep>
<import_from_stmt>typing Optional<import_stmt>torch<import_from_stmt>.mmd GaussianKernel<import_from_stmt>.detector Detector<import_stmt>torchdrift<import_stmt>warnings<def_stmt>partial_kernel_mmd_twostage x y n_perm=<none> kernel=GaussianKernel() fraction_to_match=1.0 wasserstein_p=2.0<block_start>"""Partial kernel MMD using a Wasserstein coupling to obtain the weight for the reference. """<line_sep>torchdrift.utils.check(n_perm<is><none> "Bootstrapping within partial MMD is not implemented, use bootstrap during fit" error_class=NotImplementedError)<line_sep>n,d=x.shape<line_sep>m,d2=y.shape<if_stmt>fraction_to_match<l>1.0<block_start>_,coupling=torchdrift.detectors.wasserstein(x y fraction_to_match=fraction_to_match return_coupling=<true> n_perm=<none> p=wasserstein_p)<line_sep>w=coupling[: :-1].sum(1).to(device=x.device dtype=x.dtype)/fraction_to_match<block_end><else_stmt><block_start>w=torch.full((n ) 1.0/n device=x.device dtype=x.dtype)<block_end>torchdrift.utils.check(d<eq>d2 "feature dimension mismatch")<line_sep>xy=torch.cat([x.detach() y.detach()] dim=0)<line_sep>dists=torch.cdist(xy xy p=2.0)<line_sep># we are a bit sloppy here as we just keep the diagonal and everything twice k=kernel(dists)<line_sep>k_x=k[:n :n]<line_sep>k_y=k[n: n:]<line_sep>k_xy=k[:n n:]<line_sep># The diagonals are always 1 (up to numerical error, this is (3) in Gretton et al.) mmd=(w@k_x)@w+k_y.sum()/(m<times>m)-2<times>(w@k_xy).sum()/m<line_sep><return>mmd<block_end><def_stmt>partial_kernel_mmd_qp x y n_perm=<none> kernel=GaussianKernel() fraction_to_match=1.0<block_start>"""Partial Kernel MMD using quadratic programming. This is very slow and mainly intended for reference purposes. You need to install qpsolvers to use this function."""<line_sep>torchdrift.utils.check(n_perm<is><none> "Bootstrapping within partial MMD is not implemented, use bootstrap during fit" error_class=NotImplementedError)<import_stmt>qpsolvers<line_sep>n,d=x.shape<line_sep>m,d2=y.shape<line_sep>torchdrift.utils.check(d<eq>d2 "feature dimension mismatch")<line_sep>xy=torch.cat([x.detach() y.detach()] dim=0)<line_sep>dists=torch.cdist(xy xy p=2.0)<line_sep># we are a bit sloppy here as we just keep the diagonal and everything twice k=kernel(dists.double())<line_sep>k_x=k[:n :n]<line_sep>k_y=k[n: n:]<line_sep>k_xy=k[:n n:]<line_sep>v=torch.full((m ) 1/m dtype=k_y.dtype device=k_y.device)<line_sep>R=torch.cholesky(k_x upper=<true>)<line_sep>d=torch.inverse(R.t())@(k_xy.sum(1)/m)<line_sep>lb=torch.zeros((n ) dtype=k_x.dtype device=k_x.device)<line_sep>ub=torch.full((n ) 1.0/(n<times>fraction_to_match) dtype=k_x.dtype device=k_x.device)<line_sep>w=qpsolvers.solve_ls(R.cpu().numpy() d.cpu().numpy() lb=lb.cpu().numpy() ub=ub.cpu().numpy() A=torch.ones((1 n ) dtype=R.dtype).numpy() b=torch.ones((1 ) dtype=R.dtype).numpy())<line_sep>torchdrift.utils.check(w<is><not><none> 'QP failed to find a solution (numerical accuracy with the bounds?)')<line_sep>w=torch.as_tensor(w device=k_x.device dtype=k_x.dtype)<line_sep>mmd=(w@k_x)@w+k_y.sum()/(m<times>m)-2<times>(w@k_xy).sum()/m<line_sep><return>mmd<block_end><def_stmt>partial_kernel_mmd_approx x y fraction_to_match=1.0 kernel=GaussianKernel() n_perm=<none><block_start>torchdrift.utils.check(n_perm<is><none> "Bootstrapping within partial MMD is not implemented, use bootstrap during fit" error_class=NotImplementedError)<line_sep>rng=torch.Generator(device=x.device).manual_seed(1234)<line_sep>n,d=x.shape<line_sep>m,d2=y.shape<line_sep>torchdrift.utils.check(d<eq>d2 "feature dimension mismatch")<line_sep>xy=torch.cat([x.detach() y.detach()] dim=0)<line_sep>dists=torch.cdist(xy xy p=2.0)<line_sep>k=kernel(dists.double())<line_sep>k_x=k[:n :n]<line_sep>k_y=k[n: n:]<line_sep>k_xy=k[:n n:]<line_sep>w=torch.full((n ) 1.0/n dtype=k_x.dtype device=k_x.device requires_grad=<false>)<line_sep>mmd=(w@k_x)@w+k_y.sum()/(m<times>m)-2<times>(w@k_xy).sum()/m<for_stmt>i range(100)<block_start>r=torch.rand(() device=x.device dtype=x.dtype generator=rng)+0.5<line_sep>grad_mmd=(k_x@w)-(k_xy).mean(1)<line_sep>grad_mmd_min=grad_mmd[(w<l>1.0/(n<times>fraction_to_match))]<if_stmt>grad_mmd_min.size(0)<g>0<block_start>grad_mmd_min=grad_mmd_min.min()<block_end><else_stmt><block_start>grad_mmd_min=torch.zeros_like(r)<block_end>grad_mmd_max=grad_mmd[(w<g>0)]<if_stmt>grad_mmd_max.size(0)<g>0<block_start>grad_mmd_max=grad_mmd_max.max()<block_end><else_stmt># pragma: no cover <block_start>grad_mmd_max=torch.zeros_like(r)<block_end>active_mask=(((w<g>0)|(grad_mmd<l>grad_mmd_min<times>r))&((w<l>1.0/(n<times>fraction_to_match))|(grad_mmd<g>grad_mmd_max<times>r)))<line_sep>H_mmd_active=k_x[active_mask][: active_mask]<if_stmt>H_mmd_active.size(0)<eq>0<block_start><continue><block_end>u=torch.cholesky(H_mmd_active)<line_sep>Hinvgr=torch.cholesky_solve(grad_mmd[active_mask][: <none>] u).squeeze(1)<line_sep>w_active=w[active_mask]<line_sep>Hinvgr<augsub>Hinvgr.mean()<line_sep>Hinvgr_full=torch.zeros_like(w)<line_sep>Hinvgr_full[active_mask]=Hinvgr<line_sep>step=1.0<for_stmt>j range(5)<block_start>w_cand=w.clone()<line_sep>w_cand<augsub>step<times>Hinvgr_full<line_sep>w_cand.clamp_(min=0 max=1.0/(n<times>fraction_to_match))<line_sep>w_cand<augdiv>w_cand.sum()<line_sep>mmd_cand=(w_cand@k_x)@w_cand+k_y.sum()/(m<times>m)-2<times>(w_cand@k_xy).sum()/m<line_sep>is_lower=(mmd_cand<l>mmd)<line_sep>mmd=torch.where(is_lower mmd_cand mmd)<line_sep>w=torch.where(is_lower w_cand w)<line_sep>step<augdiv>5<block_end>grad_mmd=2<times>(k_x@w)-2<times>(k_xy).mean(1)<line_sep>grad_mmd_min=grad_mmd[(w<l>1.0/(n<times>fraction_to_match))]<if_stmt>grad_mmd_min.size(0)<g>0<block_start>grad_mmd_min=grad_mmd_min.min()<block_end><else_stmt><block_start>grad_mmd_min=torch.zeros_like(r)<block_end>grad_mmd_max=grad_mmd[(w<g>0)]<if_stmt>grad_mmd_max.size(0)<g>0<block_start>grad_mmd_max=grad_mmd_max.max()<block_end><else_stmt># pragma: no cover <block_start>grad_mmd_max=torch.zeros_like(r)<block_end>active_mask=(((w<g>0)|(grad_mmd<l>grad_mmd_min<times>r))&((w<l>1.0/(n<times>fraction_to_match))|(grad_mmd<g>grad_mmd_max<times>r)))<line_sep>step=1e-1<for_stmt>j range(5)<block_start>w_candnd=w.clone()<line_sep>grad_mmd_x=grad_mmd.clone()<line_sep>grad_mmd_x=torch.where(active_mask grad_mmd_x torch.zeros(() device=grad_mmd_x.device dtype=grad_mmd_x.dtype))<line_sep>grad_mmd_x=torch.where(active_mask grad_mmd_x grad_mmd_x-grad_mmd_x.mean())<line_sep>w_cand<augsub>step<times>grad_mmd_x<line_sep>w_cand.clamp_(min=0 max=1.0/(n<times>fraction_to_match))<line_sep>w_cand<augdiv>w_cand.sum()<line_sep>mmd_cand=(w_cand@k_x)@w_cand+k_y.sum()/(m<times>m)-2<times>(w_cand@k_xy).sum()/m<line_sep>is_lower=(mmd_cand<l>mmd)<line_sep>mmd=torch.where(is_lower mmd_cand mmd)<line_sep>w=torch.where(is_lower w_cand w)<line_sep>step=step/5<block_end><block_end><return>mmd<block_end><class_stmt>PartialKernelMMDDriftDetector(Detector)<block_start>"""Drift detector based on the partial MMD Distance. (see <NAME>: Partial Wasserstein and Maximum Mean Discrepancy distances for bridging the gap between outlier detection and drift detection, https://arxiv.org/abs/2106.01289 ) Note: We recommend using dtype double as input for now. Args: fraction_to_match: fraction of x probability mass to be matched n_perm: number of bootstrap permutations to run to compute p-value (None for not returning a p-value) method: PartialKernelMMDDriftDetector.METHOD_TWOSTAGE, METHOD_APPROX, or METHOD_QP """<line_sep>METHOD_TWOSTAGE=1<line_sep>METHOD_APPROX=2<line_sep>METHOD_QP=3<def_stmt>__init__ self * return_p_value=<false> n_perm=1000 fraction_to_match=1.0 kernel=GaussianKernel() method=METHOD_TWOSTAGE <block_start>super().__init__(return_p_value=return_p_value)<line_sep>self.fraction_to_match=fraction_to_match<line_sep>self.kernel=kernel<line_sep>self.n_perm=n_perm<line_sep>self.n_test=<none><line_sep>self.scores=<none><if_stmt>method<eq>PartialKernelMMDDriftDetector.METHOD_TWOSTAGE<block_start>self.partial_mmd=partial_kernel_mmd_twostage<block_end><elif_stmt>method<eq>PartialKernelMMDDriftDetector.METHOD_APPROX<block_start>self.partial_mmd=partial_kernel_mmd_approx<block_end><elif_stmt>method<eq>PartialKernelMMDDriftDetector.METHOD_QP<block_start>self.partial_mmd=partial_kernel_mmd_qp<block_end><else_stmt># pragma: no cover <block_start><raise>RuntimeError("Invalid Partial MMD method")<block_end><block_end><def_stmt>fit self x:torch.Tensor n_test:Optional[int]=<none><block_start>"""Record a sample as the reference distribution Args: x: The reference data n_test: If an int is specified, the last n_test datapoints will not be considered part of the reference data. Instead, bootstrappin using permutations will be used to determine the distribution under the null hypothesis at fit time. Future testing must then always be done with n_test elements to get p-values. """<line_sep>x=x.detach()<if_stmt>n_test<is><none><block_start>self.base_outputs=x<block_end><else_stmt><block_start>torchdrift.utils.check(0<l>n_test<l>x.size(0) "n_test must be strictly between 0 and the number of samples")<line_sep>self.n_test=n_test<line_sep>self.base_outputs=x[:-n_test]<line_sep>n_ref=x.size(0)-n_test<line_sep>with_distant_point=self.fraction_to_match<l>1.0<line_sep>scores=[]<for_stmt>i range(self.n_perm)<block_start>slicing=torch.randperm(x.size(0))<line_sep>scores.append(self.partial_mmd(x[slicing[:-n_test]] x[slicing[-n_test:]] fraction_to_match=self.fraction_to_match kernel=self.kernel))<block_end>scores=torch.stack(scores)<line_sep>self.scores=scores<line_sep>self.dist_min=scores.min().double()<line_sep>mean=scores.mean()-self.dist_min<line_sep>var=scores.var().double()<line_sep>self.dist_alpha=mean<power>2/var<line_sep>self.dist_beta=mean/var<line_sep>self.scores=scores<block_end><return>x<block_end><def_stmt>predict_shift_from_features self base_outputs:torch.Tensor outputs:torch.Tensor compute_score:bool compute_p_value:bool individual_samples:bool=<false> <block_start>torchdrift.utils.check(<not>individual_samples "Individual samples not supported by Wasserstein distance detector")<if_stmt><not>compute_p_value<block_start>ood_score=self.partial_mmd(base_outputs outputs fraction_to_match=self.fraction_to_match n_perm=<none> )<line_sep>p_value=<none><block_end><else_stmt><block_start>torchdrift.utils.check(self.n_test<is><not><none> "Bootstrapping within partial MMD is not implemented, use bootstrap during fit" error_class=NotImplementedError)<line_sep>torchdrift.utils.check(self.n_test<eq>outputs.size(0) "number of test samples does not match calibrated number")<line_sep>ood_score=self.partial_mmd(base_outputs outputs fraction_to_match=self.fraction_to_match n_perm=<none>)<line_sep>p_value=torch.igammac(self.dist_alpha self.dist_beta<times>(ood_score-self.dist_min).clamp_(min=0))# needs PyTorch >=1.8 # z = (ood_score - self.dist_mean) / self.dist_std # p_value = 0.5 * torch.erfc(z * (0.5**0.5)) # p_value = (self.scores > ood_score).float().mean() <block_end><return>ood_score p_value<block_end><block_end>
<import_stmt>sqlite3<line_sep># This script generates the spatialite.db file in our tests directory. <def_stmt>generate_it filename<block_start>conn=sqlite3.connect(filename)<line_sep># Lead the spatialite extension: conn.enable_load_extension(<true>)<line_sep>conn.load_extension("/usr/local/lib/mod_spatialite.dylib")<line_sep>conn.execute("select InitSpatialMetadata(1)")<line_sep>conn.executescript("create table museums (name text)")<line_sep>conn.execute("SELECT AddGeometryColumn('museums', 'point_geom', 4326, 'POINT', 2);")<line_sep># At this point it is around 5MB - we can shrink it dramatically by doing thisO conn.execute("delete from spatial_ref_sys")<line_sep>conn.execute("delete from spatial_ref_sys_aux")<line_sep>conn.commit()<line_sep>conn.execute("vacuum")<line_sep>conn.close()<block_end><if_stmt>__name__<eq>"__main__"<block_start>generate_it("spatialite.db")<block_end>
<def_stmt>test_slice_bounds s# End out of range <block_start><assert_stmt>s[0:100]<eq>s<assert_stmt>s[0:-100]<eq>''<line_sep># Start out of range <assert_stmt>s[100:1]<eq>''<line_sep># Out of range both sides # This is the behaviour in cpython # assert s[-100:100] == s <block_end><def_stmt>expect_index_error s index<block_start><try_stmt><block_start>s[index]<block_end><except_stmt>IndexError<block_start><pass><block_end><else_stmt><block_start><assert_stmt><false><block_end><block_end>unicode_str="∀∂"<assert_stmt>unicode_str[0]<eq>"∀"<assert_stmt>unicode_str[1]<eq>"∂"<assert_stmt>unicode_str[-1]<eq>"∂"<line_sep>test_slice_bounds(unicode_str)<line_sep>expect_index_error(unicode_str 100)<line_sep>expect_index_error(unicode_str -100)<line_sep>ascii_str="hello world"<line_sep>test_slice_bounds(ascii_str)<assert_stmt>ascii_str[0]<eq>"h"<assert_stmt>ascii_str[1]<eq>"e"<assert_stmt>ascii_str[-1]<eq>"d"<line_sep># test unicode indexing, made more tricky by hebrew being a right-to-left language hebrew_text="בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ"<assert_stmt>len(hebrew_text)<eq>60<assert_stmt>len(hebrew_text[:])<eq>60<assert_stmt>hebrew_text[0]<eq>'ב'<assert_stmt>hebrew_text[1]<eq>'ְ'<assert_stmt>hebrew_text[2]<eq>'ּ'<assert_stmt>hebrew_text[3]<eq>'ר'<assert_stmt>hebrew_text[4]<eq>'ֵ'<assert_stmt>hebrew_text[5]<eq>'א'<assert_stmt>hebrew_text[6]<eq>'ש'<assert_stmt>hebrew_text[5:10]<eq>'אשִׁי'<assert_stmt>len(hebrew_text[5:10])<eq>5<assert_stmt>hebrew_text[-20:50]<eq>'מַיִם, וְא'<assert_stmt>len(hebrew_text[-20:50])<eq>10<assert_stmt>hebrew_text[:-30:1]<eq>'בְּרֵאשִׁית, בָּרָא אֱלֹהִים, '<assert_stmt>len(hebrew_text[:-30:1])<eq>30<assert_stmt>hebrew_text[10:-30]<eq>'ת, בָּרָא אֱלֹהִים, '<assert_stmt>len(hebrew_text[10:-30])<eq>20<assert_stmt>hebrew_text[10:30:3]<eq>'תבר לִ,'<assert_stmt>len(hebrew_text[10:30:3])<eq>7<assert_stmt>hebrew_text[10:30:-3]<eq>''<assert_stmt>hebrew_text[30:10:-3]<eq>'אםהֱאּ '<assert_stmt>len(hebrew_text[30:10:-3])<eq>7<assert_stmt>hebrew_text[30:10:-1]<eq>'א ,םיִהֹלֱא אָרָּב ,'<assert_stmt>len(hebrew_text[30:10:-1])<eq>20<line_sep>
# Copyright 2021 The BladeDISC Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <class_stmt>UnionSet<block_start><def_stmt>__init__ self num_elems:int<block_start>self._num_elems=num_elems<line_sep>self._num_sets=num_elems<line_sep>self._group_id=dict([(g g)<for>g range(0 num_elems)])<block_end><def_stmt>same_group self x:int y:int<block_start>pid_x=self.find(x)<line_sep>pid_y=self.find(x)<if_stmt>(pid_x<eq>pid_y)<block_start><return><true><block_end><return><false><block_end><def_stmt>find self x:int<block_start><assert_stmt>(x<l>self._num_elems)<assert_stmt>(x<in>self._group_id)<line_sep>pid=self._group_id[x]<if_stmt>(pid<ne>self._group_id[pid])<block_start>pid=self.find(pid)<block_end># path compression self._group_id[x]=pid<line_sep><return>pid<block_end><def_stmt>num_sets self<block_start><return>self._num_sets<block_end><def_stmt>union self x:int y:int<block_start>pid_x=self.find(x)<line_sep>pid_y=self.find(y)<if_stmt>(pid_x<eq>pid_y)<block_start><return><block_end>self._num_sets<augsub>1<line_sep>self._group_id[pid_y]=pid_x<block_end><def_stmt>get_groups self<block_start>groups_dict=dict()<for_stmt>k range(0 self._num_elems)<block_start>pid=self.find(k)<if_stmt>pid<not><in>groups_dict<block_start>groups_dict[pid]=list()<block_end>groups_dict[pid].append(k)<block_end>keys=sorted(groups_dict.keys())<line_sep>groups=list()<for_stmt>k keys<block_start><assert_stmt>(len(groups_dict[k])<g>0)<line_sep>groups.append(groups_dict[k])<block_end><assert_stmt>(self._num_sets<eq>len(groups))<line_sep><return>groups<block_end><block_end>
# MIT License # Copyright (c) 2017 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>sys<if_stmt>"ICMP.py"<in>sys.argv[0]<block_start>print("[-] Instead of poking around just try: python xfltreat.py --help")<line_sep>sys.exit(-1)<block_end><import_stmt>socket<import_stmt>time<import_stmt>select<import_stmt>os<import_stmt>struct<import_stmt>threading<import_stmt>random<import_stmt>subprocess<line_sep>#local files <import_stmt>Stateless_module<import_stmt>encryption<import_stmt>client<import_stmt>common<import_from_stmt>support.icmp_proto ICMP_Proto<import_from_stmt>support.icmp_proto ICMP_Client<class_stmt>ICMP(Stateless_module.Stateless_module)<block_start>module_name="ICMP"<line_sep>module_configname="ICMP"<line_sep>module_description="""ICMP type 8+0 module. Sends ping requests and responses. Just an ordinary ping tunnel."""<line_sep>module_os_support=common.OS_LINUX|common.OS_MACOSX|common.OS_WINDOWS|common.OS_FREEBSD<def_stmt>__init__ self<block_start>super(ICMP self).__init__()<line_sep>self.icmp=ICMP_Proto()<line_sep>self.ICMP_sequence=0<line_sep># identifier lottery self.ICMP_identifier=int(random.random()<times>65535)<line_sep># serverport lottery, not like it matters self.ICMP_fake_serverport=int(random.random()<times>65535)<line_sep># prefix to make it easier to detect xfl packets self.ICMP_prefix="XFL"<line_sep>self.timeout=2.0<line_sep># if the recv-sent>threshold: self.TRACKING_THRESHOLD=50<line_sep># then we cut back the difference with adjust: self.TRACKING_ADJUST=20<line_sep><return><block_end><def_stmt>init_client self control_message additional_data<block_start>addr=additional_data[0]<line_sep>identifier=additional_data[1]<line_sep>sequence=additional_data[2]<line_sep>client_local=ICMP_Client()<line_sep>client_local.set_ICMP_received_identifier(identifier)<line_sep>client_local.set_ICMP_received_sequence(sequence)<line_sep>client_local.set_ICMP_sent_identifier(identifier)<line_sep>client_local.set_ICMP_sent_sequence(sequence)<line_sep>client_private_ip=control_message[0:4]<line_sep>client_public_source_ip=socket.inet_aton(addr[0])<line_sep>client_public_source_port=addr[1]<line_sep># If this private IP is already used, the server removes that client. # For example: client reconnect on connection reset, duplicated configs # and yes, this can be used to kick somebody off the tunnel # close client related pipes <for_stmt>c self.clients<block_start><if_stmt>c.get_private_ip_addr()<eq>client_private_ip<block_start>save_to_close=c<line_sep>self.clients.remove(c)<if_stmt>c.get_pipe_r()<in>self.rlist<block_start>self.rlist.remove(c.get_pipe_r())<block_end><block_end><block_end>found=<false><for_stmt>c self.packetselector.get_clients()<block_start><if_stmt>c.get_private_ip_addr()<eq>client_private_ip<block_start>found=<true><line_sep>self.packetselector.delete_client(c)<block_end><block_end># If client was created but not added to the PacketSelector, then the # pipes still need to be closed. This could happen when the authenti- # cation fails or gets interrupted. <if_stmt><not>found<block_start><if_stmt>self.os_type<eq>common.OS_WINDOWS<block_start><import_stmt>win32file<try_stmt><block_start>win32file.CloseHandle(save_to_close.get_pipe_r())<line_sep>win32file.CloseHandle(save_to_close.get_pipe_w())<block_end><except_stmt><block_start><pass><block_end><block_end><else_stmt><block_start><try_stmt><block_start>save_to_close.get_pipe_r_fd().close()<line_sep>save_to_close.get_pipe_w_fd().close()<block_end><except_stmt><block_start><pass><block_end><block_end><block_end># creating new pipes for the client pipe_r,pipe_w=os.pipe()<line_sep>client_local.set_pipes_fdnum(pipe_r pipe_w)<line_sep>client_local.set_pipes_fd(os.fdopen(pipe_r "r") os.fdopen(pipe_w "w"))<line_sep># set connection related things and authenticated to True client_local.set_public_ip_addr(client_public_source_ip)<line_sep>client_local.set_public_src_port(client_public_source_port)<line_sep>client_local.set_private_ip_addr(client_private_ip)<line_sep>client_local.get_encryption().set_module(self.encryption.get_module())<line_sep>self.encryption=client_local.get_encryption()<if_stmt>self.encryption.get_module().get_step_count()# add encryption steps <block_start>self.merge_cmh(self.encryption.get_module().get_cmh_struct())<block_end><if_stmt>self.authentication.get_step_count()# add authentication steps <block_start>self.merge_cmh(self.authentication.get_cmh_struct())<block_end>client_local.set_initiated(<true>)<line_sep>self.clients.append(client_local)<line_sep><return><block_end><def_stmt>lookup_client_pub self additional_data<block_start>addr=additional_data[0]<line_sep>identifier=additional_data[1]<line_sep>client_public_ip=socket.inet_aton(addr[0])<for_stmt>c self.clients<block_start><if_stmt>(c.get_public_ip_addr()<eq>client_public_ip)<and>(c.get_ICMP_received_identifier()<eq>identifier)<block_start><return>c<block_end><block_end><return><none><block_end><def_stmt>post_authentication_server self control_message additional_data<block_start>addr=additional_data[0]<line_sep>identifier=additional_data[1]<line_sep>c=self.lookup_client_pub((addr identifier))<if_stmt>c.get_initiated()<block_start>c.set_authenticated(<true>)<line_sep>self.packetselector.add_client(c)<if_stmt>c.get_pipe_r()<not><in>self.rlist<block_start>self.rlist.append(c.get_pipe_r())<block_end><return><true><block_end><return><false><block_end><def_stmt>remove_initiated_client self control_message additional_data<block_start>addr=additional_data[0]<line_sep>identifier=additional_data[1]<line_sep>c=self.lookup_client_pub((addr identifier))<if_stmt>c<block_start>self.packetselector.delete_client(c)<if_stmt>c.get_authenticated()<block_start>self.rlist.remove(c.get_pipe_r())<block_end>self.clients.remove(c)<block_end><return><block_end><def_stmt>communication_initialization self<block_start>self.clients=[]<if_stmt>self.serverorclient<block_start><if_stmt>self.os_type<eq>common.OS_LINUX<block_start>ps=subprocess.Popen(["cat" "/proc/sys/net/ipv4/icmp_echo_ignore_all"] stdout=subprocess.PIPE stderr=subprocess.PIPE)<line_sep>(stdout stderr)=ps.communicate()<if_stmt>stderr<block_start>common.internal_print("Error: deleting default route: {0}".format(stderr) -1)<line_sep>sys.exit(-1)<block_end>self.orig_ieia_value=stdout[0:1]<line_sep>os.system("echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_all")<block_end><block_end><if_stmt>self.serverorclient<block_start>self.ICMP_send=self.icmp.ICMP_ECHO_RESPONSE<block_end><else_stmt><block_start>self.ICMP_send=self.icmp.ICMP_ECHO_REQUEST<block_end><return><block_end><def_stmt>modify_additional_data self additional_data serverorclient<block_start><if_stmt>serverorclient<block_start>c=self.lookup_client_pub(additional_data)<if_stmt>c<block_start>c.set_ICMP_sent_sequence(additional_data[2])<block_end><return>additional_data<block_end><else_stmt># increment sequence in additional data <block_start>self.ICMP_sequence<augadd>1<line_sep><return>(additional_data[0] additional_data[1] self.ICMP_sequence additional_data[3])<block_end><block_end># check request: generating a challenge and sending it to the server # in case the answer is that is expected, the targer is a valid server <def_stmt>do_check self<block_start>message,self.check_result=self.checks.check_default_generate_challenge()<line_sep>self.send(common.CONTROL_CHANNEL_BYTE common.CONTROL_CHECK+message (self.server_tuple self.ICMP_identifier 0 0))<line_sep><return><block_end># start talking to the server # do authentication or encryption first <def_stmt>do_hello self# TODO: maybe change this later to push some more info, not just the # private IP <block_start>message=socket.inet_aton(self.config.get("Global" "clientip"))<line_sep>self.send(common.CONTROL_CHANNEL_BYTE common.CONTROL_INIT+message (self.server_tuple self.ICMP_identifier self.ICMP_sequence 0))<block_end># Polite signal towards the server to tell that the client is leaving # Can be spoofed? if there is no encryption. Who cares? <def_stmt>do_logoff self<block_start>self.send(common.CONTROL_CHANNEL_BYTE common.CONTROL_LOGOFF (self.server_tuple self.ICMP_identifier self.ICMP_sequence 0))<line_sep><return><block_end><def_stmt>do_dummy_packet self identifier sequence<block_start>self.send(common.CONTROL_CHANNEL_BYTE common.CONTROL_DUMMY_PACKET (self.server_tuple identifier sequence 0))<line_sep><return><block_end><def_stmt>send self channel_type message additional_data<block_start>addr=additional_data[0]<line_sep>identifier=additional_data[1]<line_sep>sequence=additional_data[2]<line_sep>queue_length=additional_data[3]<if_stmt>queue_length<l>256<block_start>ql=chr(queue_length)<block_end><else_stmt><block_start>ql=chr(255)<block_end><if_stmt>channel_type<eq>common.CONTROL_CHANNEL_BYTE<block_start>transformed_message=self.transform(self.get_client_encryption(additional_data) ql+common.CONTROL_CHANNEL_BYTE+message 1)<block_end><else_stmt><block_start>transformed_message=self.transform(self.get_client_encryption(additional_data) ql+common.DATA_CHANNEL_BYTE+message 1)<block_end>common.internal_print("ICMP sent: {0} seq: {1} id: {2}".format(len(transformed_message) sequence identifier) 0 self.verbosity common.DEBUG)<line_sep>packet=self.icmp.create_packet(self.ICMP_send identifier sequence self.ICMP_prefix+struct.pack(">H" len(transformed_message))+transformed_message)<line_sep># WORKAROUND?! # Windows: It looks like when the buffer fills up the OS does not do # congestion control, instead throws and exception/returns with # WSAEWOULDBLOCK which means that we need to try it again later. # So we sleep 100ms and hope that the buffer has more space for us. # If it does then it sends the data, otherwise tries it in an infinite # loop... <while_stmt><true><block_start><try_stmt><block_start><return>self.comms_socket.sendto(packet addr)<block_end><except_stmt>socket.error<as>se<block_start><if_stmt>se.args[0]<eq>10035# WSAEWOULDBLOCK <block_start>time.sleep(0.1)<line_sep><pass><block_end><else_stmt><block_start><raise><block_end><block_end><block_end><block_end><def_stmt>recv self<block_start>message,addr=self.comms_socket.recvfrom(1508)<line_sep>identifier=struct.unpack("<H" message[24:26])[0]<line_sep>sequence=struct.unpack(">H" message[26:28])[0]<if_stmt>message[28:28+len(self.ICMP_prefix)]<ne>self.ICMP_prefix<block_start><return>("" <none> <none> <none> <none>)<block_end>message=message[28+len(self.ICMP_prefix):]<line_sep>length=struct.unpack(">H" message[0:2])[0]<if_stmt>(length+2<ne>len(message))<block_start>common.internal_print("Error length mismatch {0} {1}".format(length len(message)) -1)<line_sep><return>("" <none> <none> <none> <none>)<block_end>message=self.transform(self.get_client_encryption((addr identifier 0 0)) message[2:length+2] 0)<line_sep>queue_length=struct.unpack(">B" message[0:1])[0]<line_sep>common.internal_print("ICMP read: {0} seq: {1} id: {2}".format(length sequence identifier) 0 self.verbosity common.DEBUG)<line_sep><return>message[1:] addr identifier sequence queue_length<block_end><def_stmt>communication_win self is_check<block_start><import_stmt>win32event<import_stmt>win32file<import_stmt>win32api<import_stmt>pywintypes<import_stmt>winerror<line_sep># event for the socket hEvent_sock=win32event.CreateEvent(<none> 0 0 <none>)<line_sep>win32file.WSAEventSelect(self.comms_socket hEvent_sock win32file.FD_READ)<line_sep># descriptor list self.rlist=[self.comms_socket]<line_sep># overlapped list self.olist=[0]<line_sep># event list self.elist=[hEvent_sock]<line_sep># message buffer list self.mlist=[0]<line_sep># id of the read object - put in this if it was read self.ulist=[]<if_stmt><not>self.serverorclient<and>self.tunnel# client mode # objects created for the tunnel and put in the corresponding # lists <block_start>hEvent_pipe=win32event.CreateEvent(<none> 0 0 <none>)# for reading from the pipe overlapped_pipe=pywintypes.OVERLAPPED()<line_sep>overlapped_pipe.hEvent=hEvent_pipe<line_sep>message_buffer=win32file.AllocateReadBuffer(4096)<line_sep>self.rlist.append(self.tunnel)<line_sep>self.olist.append(overlapped_pipe)<line_sep>self.elist.append(hEvent_pipe)<line_sep>self.mlist.append(message_buffer)<line_sep>self.ulist.append(1)<block_end><while_stmt><not>self._stop<block_start><try_stmt><block_start><if_stmt><not>self.tunnel# check or server mode without client only with socket #message, addr = self.comms_socket.recvfrom(1508) <block_start>rc=win32event.WaitForSingleObject(hEvent_sock int(self.timeout<times>1000))<if_stmt>rc<eq>winerror.WAIT_TIMEOUT# timed out, just rerun and wait <block_start><continue><block_end><block_end><else_stmt><block_start><if_stmt>self.ulist# there is somebody waiting to be read <block_start><for_stmt>idx self.ulist# issueing ReadFile on all not yet read mailslots/tunnel <block_start>hr,_=win32file.ReadFile(self.rlist[idx] self.mlist[idx] self.olist[idx])<if_stmt>(hr<ne>0)<and>(hr<ne>winerror.ERROR_IO_PENDING)<block_start>common.internal_print("UDP ReadFile failed: {0}".format(hr) -1)<line_sep><raise><block_end><block_end>self.ulist=[]<block_end># waiting to get some data somewhere rc=win32event.WaitForMultipleObjects(self.elist 0 int(self.timeout<times>1000))<if_stmt>rc<eq>winerror.WAIT_TIMEOUT# timed out, just rerun and wait <block_start><continue><block_end><block_end><if_stmt>rc<l>0x80# STATUS_ABANDONED_WAIT_0 <block_start><if_stmt>rc<g>0# the tunnel or one of the mailslots got signalled <block_start>self.ulist.append(rc)<if_stmt>(self.olist[rc].InternalHigh<l>4)<or>(self.mlist[rc][0:1]<ne>"\x45")#Only care about IPv4 <block_start><continue><block_end>readytogo=self.mlist[rc][0:self.olist[rc].InternalHigh]<if_stmt>self.serverorclient<block_start>c=self.lookup_client_priv(readytogo)<if_stmt>c# if the differece between the received and set sequences too big # some routers/firewalls just drop older sequences. If it gets # too big, we just drop the older ones and use the latest X packet # this helps on stabality. <block_start><if_stmt>(c.get_ICMP_received_sequence()-c.get_ICMP_sent_sequence())<ge>self.TRACKING_THRESHOLD<block_start>c.set_ICMP_sent_sequence(c.get_ICMP_received_sequence()-self.TRACKING_ADJUST)<block_end># get client related values: identifier and sequence number identifier=c.get_ICMP_sent_identifier()<line_sep>sequence=c.get_ICMP_sent_sequence()<line_sep># queueing every packet first c.queue_put(readytogo)<line_sep># are there any packets to answer? <if_stmt>(c.get_ICMP_received_sequence()-sequence)<eq>0<block_start><continue><block_end><else_stmt><block_start>request_num=0<line_sep># if there is less packet than that we have in the queue # then we cap the outgoing packet number <if_stmt>(c.get_ICMP_received_sequence()-sequence)<l>(c.queue_length())<block_start>number_to_get=(c.get_ICMP_received_sequence()-sequence)<block_end><else_stmt># send all packets from the queue <block_start>number_to_get=c.queue_length()<block_end><for_stmt>i range(0 number_to_get)# get first packet <block_start>readytogo=c.queue_get()<line_sep># is it he last one we are sending now? <if_stmt>i<eq>(number_to_get-1)# if the last one and there is more in the queue # then we ask for dummy packets <block_start>request_num=c.queue_length()<block_end># go packets go! self.send(common.DATA_CHANNEL_BYTE readytogo ((socket.inet_ntoa(c.get_public_ip_addr()) c.get_public_src_port()) identifier sequence+i+1 request_num))<block_end>sequence=(sequence+i+1)%65536<line_sep>c.set_ICMP_sent_sequence(sequence)<block_end><block_end><else_stmt># there is no client with that IP <block_start>common.internal_print("Client not found, strange?!" 0 self.verbosity common.DEBUG)<line_sep><continue><block_end><block_end><else_stmt><block_start><if_stmt>self.authenticated# whatever we have from the tunnel, just encapsulate it # and send it out <block_start>self.ICMP_sequence=(self.ICMP_sequence+1)%65536<line_sep>self.send(common.DATA_CHANNEL_BYTE readytogo (self.server_tuple self.ICMP_identifier self.ICMP_sequence 0))<line_sep>#?? <block_end><else_stmt><block_start>common.internal_print("Spoofed packets, strange?!" 0 self.verbosity common.DEBUG)<line_sep><continue><block_end><block_end><block_end><if_stmt>rc<eq>0# socket got signalled <block_start>message,addr,identifier,sequence,queue_length=self.recv()<if_stmt>len(message)<eq>0<block_start><continue><block_end>c=<none><if_stmt>self.serverorclient<block_start>self.authenticated=<false><line_sep>c=self.lookup_client_pub((addr 0))<if_stmt>c<block_start>c.set_ICMP_received_identifier(identifier)<line_sep># packets does not arrive in order sometime # if higher sequence arrived already, then we # do not modify # 16bit integer MAX could be a bit tricky, a # threshold had to be introduced to make it # fail safe. Hacky but should work. ICMP_THRESHOLD=100<if_stmt>(sequence<g>c.get_ICMP_received_sequence())<or>((sequence<l>ICMP_THRESHOLD)<and>((sequence+65536)<g>c.get_ICMP_received_sequence())<and>(c.get_ICMP_received_sequence()<g>ICMP_THRESHOLD))<block_start>c.set_ICMP_received_sequence(sequence)<block_end><block_end><block_end><else_stmt><block_start><if_stmt>queue_length<block_start>common.internal_print("sending {0} dummy packets".format(queue_length) 0 self.verbosity common.DEBUG)<for_stmt>i range(queue_length+10)<block_start>self.ICMP_sequence=(self.ICMP_sequence+1)%65536<line_sep>self.do_dummy_packet(self.ICMP_identifier self.ICMP_sequence)<block_end><block_end><block_end><if_stmt>common.is_control_channel(message[0:1])<block_start><if_stmt>self.controlchannel.handle_control_messages(self message[len(common.CONTROL_CHANNEL_BYTE):] (addr identifier sequence 0))<block_start><continue><block_end><else_stmt><block_start>self.stop()<line_sep><break><block_end><block_end><if_stmt>c<block_start>self.authenticated=c.get_authenticated()<block_end><if_stmt>self.authenticated<block_start><try_stmt><block_start>self.packet_writer(message[len(common.CONTROL_CHANNEL_BYTE):])<block_end><except_stmt>OSError<as>e<block_start>print(e)<block_end><block_end><block_end><block_end><block_end><except_stmt>win32api.error<as>e<block_start>common.internal_print("UDP Exception: {0}".format(e) -1)<block_end><except_stmt>socket.error<as>se<block_start><if_stmt>se.args[0]<eq>10054# port is unreachable <block_start>common.internal_print("Server's port is unreachable: {0}".format(se) -1)<line_sep>self._stop=<true><block_end><block_end><block_end><return><true><block_end><def_stmt>communication_unix self is_check<block_start>sequence=0<line_sep>identifier=0<line_sep>self.rlist=[self.comms_socket]<if_stmt><not>self.serverorclient<and>self.tunnel<block_start>self.rlist=[self.tunnel self.comms_socket]<block_end>wlist=[]<line_sep>xlist=[]<while_stmt><not>self._stop<block_start><try_stmt><block_start>readable,writable,exceptional=select.select(self.rlist wlist xlist self.timeout)<block_end><except_stmt>select.error e<block_start>common.internal_print("select.error: %r".format(e) -1)<line_sep><break><block_end><try_stmt><block_start><if_stmt><not>readable<block_start><if_stmt>is_check<block_start><raise>socket.timeout<block_end><if_stmt><not>self.serverorclient<block_start><if_stmt>self.authenticated<block_start>self.ICMP_sequence=(self.ICMP_sequence+1)%65536<line_sep>self.do_dummy_packet(self.ICMP_identifier self.ICMP_sequence)<line_sep>common.internal_print("Keep alive sent" 0 self.verbosity common.DEBUG)<block_end><block_end><continue><block_end><for_stmt>s readable<block_start><if_stmt>(s<in>self.rlist)<and><not>(s<is>self.comms_socket)<block_start>message=self.packet_reader(s <true> self.serverorclient)<while_stmt><true><block_start><if_stmt>(len(message)<l>4)<or>(message[0:1]<ne>"\x45")#Only care about IPv4 <block_start><break><block_end>packetlen=struct.unpack(">H" message[2:4])[0]# IP Total length <if_stmt>packetlen<g>len(message)<block_start>message<augadd>self.packet_reader(s <false> self.serverorclient)<block_end>readytogo=message[0:packetlen]<line_sep>message=message[packetlen:]<if_stmt>self.serverorclient<block_start>c=self.lookup_client_priv(readytogo)<if_stmt>c# if the differece between the received and set sequences too big # some routers/firewalls just drop older sequences. If it gets # too big, we just drop the older ones and use the latest X packet # this helps on stabality. <block_start><if_stmt>(c.get_ICMP_received_sequence()-c.get_ICMP_sent_sequence())<ge>self.TRACKING_THRESHOLD<block_start>c.set_ICMP_sent_sequence(c.get_ICMP_received_sequence()-self.TRACKING_ADJUST)<block_end># get client related values: identifier and sequence number identifier=c.get_ICMP_sent_identifier()<line_sep>sequence=c.get_ICMP_sent_sequence()<line_sep># queueing every packet first c.queue_put(readytogo)<line_sep># are there any packets to answer? <if_stmt>(c.get_ICMP_received_sequence()-sequence)<eq>0<block_start><continue><block_end><else_stmt><block_start>request_num=0<line_sep># if there is less packet than that we have in the queue # then we cap the outgoing packet number <if_stmt>(c.get_ICMP_received_sequence()-sequence)<l>(c.queue_length())<block_start>number_to_get=(c.get_ICMP_received_sequence()-sequence)<block_end><else_stmt># send all packets from the queue <block_start>number_to_get=c.queue_length()<block_end><for_stmt>i range(0 number_to_get)# get first packet <block_start>readytogo=c.queue_get()<line_sep># is it he last one we are sending now? <if_stmt>i<eq>(number_to_get-1)# if the last one and there is more in the queue # then we ask for dummy packets <block_start>request_num=c.queue_length()<block_end># go packets go! self.send(common.DATA_CHANNEL_BYTE readytogo ((socket.inet_ntoa(c.get_public_ip_addr()) c.get_public_src_port()) identifier sequence+i+1 request_num))<block_end>sequence=(sequence+i+1)%65536<line_sep>c.set_ICMP_sent_sequence(sequence)<block_end><block_end><else_stmt># there is no client with that IP <block_start>common.internal_print("Client not found, strange?!" 0 self.verbosity common.DEBUG)<line_sep><continue><block_end><block_end><else_stmt><block_start><if_stmt>self.authenticated# whatever we have from the tunnel, just encapsulate it # and send it out <block_start>self.ICMP_sequence=(self.ICMP_sequence+1)%65536<line_sep>self.send(common.DATA_CHANNEL_BYTE readytogo (self.server_tuple self.ICMP_identifier self.ICMP_sequence 0))<line_sep>#?? <block_end><else_stmt><block_start>common.internal_print("Spoofed packets, strange?!" 0 self.verbosity common.DEBUG)<line_sep><continue><block_end><block_end><block_end><block_end><if_stmt>s<is>self.comms_socket<block_start>message,addr,identifier,sequence,queue_length=self.recv()<if_stmt>len(message)<eq>0<block_start><continue><block_end>c=<none><if_stmt>self.serverorclient<block_start>self.authenticated=<false><line_sep>c=self.lookup_client_pub((addr identifier))<if_stmt>c<block_start>c.set_ICMP_received_identifier(identifier)<line_sep># packets does not arrive in order sometime # if higher sequence arrived already, then we # do not modify # 16bit integer MAX could be a bit tricky, a # threshold had to be introduced to make it # fail safe. Hacky but should work. ICMP_THRESHOLD=100<if_stmt>(sequence<g>c.get_ICMP_received_sequence())<or>((sequence<l>ICMP_THRESHOLD)<and>((sequence+65536)<g>c.get_ICMP_received_sequence())<and>(c.get_ICMP_received_sequence()<g>ICMP_THRESHOLD))<block_start>c.set_ICMP_received_sequence(sequence)<block_end><block_end><block_end><else_stmt><block_start><if_stmt>queue_length<block_start>common.internal_print("sending {0} dummy packets".format(queue_length) 0 self.verbosity common.DEBUG)<for_stmt>i range(queue_length+10)<block_start>self.ICMP_sequence=(self.ICMP_sequence+1)%65536<line_sep>self.do_dummy_packet(self.ICMP_identifier self.ICMP_sequence)<block_end><block_end><block_end><if_stmt>common.is_control_channel(message[0:1])<block_start><if_stmt>self.controlchannel.handle_control_messages(self message[len(common.CONTROL_CHANNEL_BYTE):] (addr identifier sequence 0))<block_start><continue><block_end><else_stmt><block_start>self.stop()<line_sep><break><block_end><block_end><if_stmt>c<block_start>self.authenticated=c.get_authenticated()<block_end><if_stmt>self.authenticated<block_start><try_stmt><block_start>self.packet_writer(message[len(common.CONTROL_CHANNEL_BYTE):])<block_end><except_stmt>OSError<as>e<block_start>print(e)<block_end><block_end><block_end><block_end><block_end><except_stmt>(socket.error OSError)<block_start><raise><if_stmt>self.serverorclient<block_start>self.comms_socket.close()<block_end><break><block_end><except_stmt><block_start>print("another error")<line_sep><raise><block_end><block_end><return><block_end><def_stmt>serve self<block_start>server_socket=<none><line_sep>self.serverorclient=1<try_stmt><block_start>common.internal_print("Starting module: {0} on {1}".format(self.get_module_name() self.config.get("Global" "serverbind")))<line_sep>server_socket=socket.socket(socket.AF_INET socket.SOCK_RAW socket.IPPROTO_ICMP)<if_stmt>(self.os_type<eq>common.OS_WINDOWS)<or>(self.os_type<eq>common.OS_MACOSX)<block_start>common.internal_print("This module can be run in client mode only on this operating system." -1)<line_sep>self.cleanup()<line_sep><return><block_end>self.comms_socket=server_socket<line_sep>self.authenticated=<false><line_sep>self.communication_initialization()<line_sep>self.communication(<false>)<block_end><except_stmt>KeyboardInterrupt<block_start>self.cleanup()<line_sep><return><block_end>self.cleanup()<line_sep><return><block_end><def_stmt>connect self<block_start><try_stmt><block_start>common.internal_print("Starting client: {0}".format(self.get_module_name()))<line_sep>server_socket=socket.socket(socket.AF_INET socket.SOCK_RAW socket.IPPROTO_ICMP)<if_stmt>self.os_type<eq>common.OS_WINDOWS# this should give back the default route interface IP <block_start>default_host_ip=socket.gethostbyname(socket.gethostname())<line_sep>server_socket.bind((default_host_ip 0))<block_end>self.server_tuple=(self.config.get("Global" "remoteserverip") self.ICMP_fake_serverport)<line_sep>self.comms_socket=server_socket<line_sep>self.serverorclient=0<line_sep>self.authenticated=<false><line_sep>self.communication_initialization()<line_sep>self.do_hello()<line_sep>self.communication(<false>)<block_end><except_stmt>KeyboardInterrupt<block_start>self.do_logoff()<line_sep>self.cleanup()<line_sep><raise><block_end>self.cleanup()<line_sep><return><block_end><def_stmt>check self<block_start><try_stmt><block_start>common.internal_print("Checking module on server: {0}".format(self.get_module_name()))<line_sep>server_socket=socket.socket(socket.AF_INET socket.SOCK_RAW socket.IPPROTO_ICMP)<if_stmt>self.os_type<eq>common.OS_WINDOWS# this should give back the default route interface IP <block_start>default_host_ip=socket.gethostbyname(socket.gethostname())<line_sep>server_socket.bind((default_host_ip 0))<block_end>self.server_tuple=(self.config.get("Global" "remoteserverip") self.ICMP_fake_serverport)<line_sep>self.comms_socket=server_socket<line_sep>self.serverorclient=0<line_sep>self.authenticated=<false><line_sep>self.communication_initialization()<line_sep>self.do_check()<line_sep>self.communication(<true>)<block_end><except_stmt>KeyboardInterrupt<block_start>self.cleanup()<line_sep><raise><block_end><except_stmt>socket.timeout<block_start>common.internal_print("Checking failed: {0}".format(self.get_module_name()) -1)<block_end>self.cleanup()<line_sep><return><block_end><def_stmt>cleanup self<block_start>common.internal_print("Shutting down module: {0}".format(self.get_module_name()))<if_stmt>self.serverorclient<block_start><if_stmt>self.os_type<eq>common.OS_LINUX<block_start>os.system("echo {0} > /proc/sys/net/ipv4/icmp_echo_ignore_all".format(self.orig_ieia_value))#??? <block_end><block_end><try_stmt><block_start>self.comms_socket.close()<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>os.close(self.tunnel)<block_end><except_stmt><block_start><pass><block_end><block_end><block_end>
<import_stmt>sqlite3<line_sep>ver=sqlite3.sqlite_version<line_sep>con=sqlite3.connect(':memory:' 1 sqlite3.PARSE_DECLTYPES <none>)<line_sep>cur=con.cursor()<line_sep>cur.execute('CREATE TABLE test (id INT, txt TEXT)')<line_sep>cur.execute('INSERT INTO test VALUES (?, ?)' (42 'wut'))<line_sep>cur.execute('SELECT * FROM test')<assert_stmt>(cur.fetchall()<eq>[(42 'wut')])<line_sep>cur.execute('DROP TABLE test')<line_sep>con.close()<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('main' '0005_auto_20141121_1053') ]<line_sep>operations=[migrations.AddField(model_name='host' name='staleness' field=models.PositiveIntegerField(default=0 verbose_name='staleness') preserve_default=<true> ) migrations.AddField(model_name='host' name='staleness_notification_timestamp' field=models.DateTimeField(null=<true> verbose_name='staleness notification time' blank=<true>) preserve_default=<true> ) ]<block_end>
<import_stmt>operator<import_stmt>pytest<import_from_stmt>...containers Tuple List<import_from_stmt>..bool_ Bool<import_from_stmt>..number Int<import_from_stmt>..string Str<line_sep>@pytest.mark.parametrize("other, result_type, op, reflected" [(Str("") Str operator.add <true>) (Int(1) Str operator.mul <false>) (Str("") Bool operator.lt <false>) (Str("") Bool operator.le <false>) (Str("") Bool operator.eq <false>) (Str("") Bool operator.ge <false>) (Str("") Bool operator.gt <false>) (Str("") Bool operator.ne <false>) ] )<def_stmt>test_supported_binary_methods other result_type op reflected<block_start><assert_stmt>isinstance(op(Str("") other) result_type)<if_stmt>reflected<block_start><assert_stmt>isinstance(op(other Str("")) result_type)<block_end><block_end><def_stmt>test_contains <block_start><with_stmt>pytest.raises(TypeError)<block_start>Str("")<in>Str("")<block_end><assert_stmt>isinstance(Str("").contains(Str("")) Bool)<block_end><def_stmt>test_reversed <block_start><assert_stmt>isinstance(reversed(Str("")) Str)<block_end><def_stmt>test_length <block_start><with_stmt>pytest.raises(TypeError)<block_start>len(Str(""))<block_end><assert_stmt>isinstance(Str("").length() Int)<block_end>@pytest.mark.parametrize("method, return_type, args" [# custom ("contains" Bool [""]) ("length" Int []) # from python ("capitalize" Str []) ("center" Str [1]) ("count" Int [""]) # ("decode", Str, []), # TODO need more types to implement # ("encode", Str, []), # TODO need more types to implement ("endswith" Bool [""]) ("expandtabs" Str []) ("find" Int [""]) ("format" Str [""]) ("__getitem__" Str [0]) ("__getitem__" Str [slice(0 1 0)]) ("isalnum" Bool []) ("isalpha" Bool []) ("isdigit" Bool []) ("islower" Bool []) ("isspace" Bool []) ("istitle" Bool []) ("isupper" Bool []) ("join" Str [("a" "b")]) ("ljust" Str [1]) ("lower" Str []) ("lstrip" Str []) ("partition" Tuple[Str Str Str] [""]) ("replace" Str ["" ""]) ("rfind" Int [""]) ("rjust" Str [1]) ("rpartition" Tuple[Str Str Str] [""]) ("rsplit" List[Str] [""]) ("rstrip" Str []) ("split" List[Str] [""]) ("splitlines" List[Str] []) ("startswith" Bool [""]) ("strip" Str []) ("swapcase" Str []) ("title" Str []) ("upper" Str []) ("zfill" Str [0]) ] )<def_stmt>test_has_methods method return_type args<block_start>s=Str("")<line_sep>out=getattr(s method)(*args)<assert_stmt>isinstance(out return_type)<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMOffline.Trigger.HLTMuonOfflineAnalyzer_cfi hltMuonOfflineAnalyzer<line_sep>barrelMuonParams=cms.PSet(d0Cut=cms.untracked.double(1000.0) z0Cut=cms.untracked.double(1000.0) recoCuts=cms.untracked.string("isStandAloneMuon && abs(eta) < 0.9") hltCuts=cms.untracked.string("abs(eta) < 0.9") )<line_sep>endcapMuonParams=cms.PSet(d0Cut=cms.untracked.double(1000.0) z0Cut=cms.untracked.double(1000.0) recoCuts=cms.untracked.string("isStandAloneMuon && abs(eta) > 1.4 && "<concat>"abs(eta) < 2.0") hltCuts=cms.untracked.string("abs(eta) > 1.4 && abs(eta) < 2.0") )<line_sep>allMuonParams=cms.PSet(d0Cut=cms.untracked.double(1000.0) z0Cut=cms.untracked.double(1000.0) recoCuts=cms.untracked.string("isStandAloneMuon && abs(eta) < 2.0") hltCuts=cms.untracked.string("abs(eta) < 2.0") )<line_sep>barrelAnalyzer=hltMuonOfflineAnalyzer.clone()<line_sep>barrelAnalyzer.destination="HLT/Muon/DistributionsBarrel"<line_sep>barrelAnalyzer.targetParams=barrelMuonParams<line_sep>barrelAnalyzer.probeParams=cms.PSet()<line_sep>endcapAnalyzer=hltMuonOfflineAnalyzer.clone()<line_sep>endcapAnalyzer.destination="HLT/Muon/DistributionsEndcap"<line_sep>endcapAnalyzer.targetParams=endcapMuonParams<line_sep>endcapAnalyzer.probeParams=cms.PSet()<line_sep>allAnalyzer=hltMuonOfflineAnalyzer.clone()<line_sep>allAnalyzer.destination="HLT/Muon/DistributionsAll"<line_sep>allAnalyzer.targetParams=allMuonParams<line_sep>allAnalyzer.probeParams=allMuonParams<line_sep>hltMuonOfflineAnalyzers=cms.Sequence(barrelAnalyzer<times>endcapAnalyzer<times>allAnalyzer)<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>django_services.api DjangoServiceSerializer<import_from_stmt>.models Team AccountUser<class_stmt>TeamSerializer(DjangoServiceSerializer)<block_start><class_stmt>Meta<block_start>model=Team<block_end><block_end><class_stmt>UserSerializer(DjangoServiceSerializer)<block_start><class_stmt>Meta<block_start>model=AccountUser<block_end><block_end>
#This file is part of ElectricEye. #SPDX-License-Identifier: Apache-2.0 #Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, #software distributed under the License is distributed on an #"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #KIND, either express or implied. See the License for the #specific language governing permissions and limitations #under the License. <import_stmt>boto3<import_stmt>datetime<import_stmt>json<import_from_stmt>check_register CheckRegister<line_sep>registry=CheckRegister()<line_sep>imagebuilder=boto3.client("imagebuilder")<line_sep>@registry.register_check("imagebuilder")<def_stmt>imagebuilder_pipeline_tests_enabled_check cache:dict awsAccountId:str awsRegion:str awsPartition:str<arrow>dict<block_start>"""[ImageBuilder.1] Image pipeline tests should be enabled"""<line_sep>pipelines=imagebuilder.list_image_pipelines()<line_sep>pipeline_list=pipelines["imagePipelineList"]<line_sep>iso8601Time=datetime.datetime.now(datetime.timezone.utc).isoformat()<for_stmt>arn pipeline_list<block_start>pipelineArn=arn["arn"]<line_sep>pipeline_name=arn["name"]<line_sep>image_pipelines=imagebuilder.get_image_pipeline(imagePipelineArn=pipelineArn)<line_sep>image_test_config=image_pipelines["imagePipeline"]["imageTestsConfiguration"]<line_sep>image_test_enabled=image_test_config["imageTestsEnabled"]<if_stmt>image_test_enabled<eq><true><block_start>finding={"SchemaVersion":"2018-10-08" "Id":pipelineArn+"/imagebuilder-pipeline-tests-enabled-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":pipelineArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices" "Effects/Data Exposure" ] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ImageBuilder.1] Image pipeline tests should be enabled" "Description":"Image pipeline "+pipeline_name+" has tests enabled." "Remediation":{"Recommendation":{"Text":"For more information on EC2 Image Builder Security and enabling image testing refer to the Best Practices section of the Amazon EC2 Image Builder Developer Guide." "Url":"https://docs.aws.amazon.com/imagebuilder/latest/userguide/security-best-practices.html" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsImageBuilderPipeline" "Id":pipelineArn "Partition":awsPartition "Region":awsRegion "Details":{"AwsImageBuilderPipeline":{"PipelineName":pipeline_name}} }] "Compliance":{"Status":"PASSED" "RelatedRequirements":["NIST CSF ID.AM-2" "NIST SP 800-53 CM-8" "NIST SP 800-53 PM-5" "AICPA TSC CC3.2" "AICPA TSC CC6.1" "ISO 27001:2013 A.8.1.1" "ISO 27001:2013 A.8.1.2" "ISO 27001:2013 A.12.5.1" ] } "Workflow":{"Status":"RESOLVED"} "RecordState":"ARCHIVED" }<line_sep><yield>finding<block_end><else_stmt><block_start>finding={"SchemaVersion":"2018-10-08" "Id":pipelineArn+"/imagebuilder-pipeline-tests-enabled-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":pipelineArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices" "Effects/Data Exposure" ] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"MEDIUM"} "Confidence":99 "Title":"[ImageBuilder.1] Image pipeline tests should be enabled" "Description":"Image pipeline "+pipeline_name+" does not have tests enabled." "Remediation":{"Recommendation":{"Text":"For more information on EC2 Image Builder Security and enabling image testing refer to the Best Practices section of the Amazon EC2 Image Builder Developer Guide." "Url":"https://docs.aws.amazon.com/imagebuilder/latest/userguide/security-best-practices.html" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsImageBuilderPipeline" "Id":pipelineArn "Partition":awsPartition "Region":awsRegion "Details":{"AwsImageBuilderPipeline":{"PipelineName":pipeline_name}} }] "Compliance":{"Status":"FAILED" "RelatedRequirements":["NIST CSF ID.AM-2" "NIST SP 800-53 CM-8" "NIST SP 800-53 PM-5" "AICPA TSC CC3.2" "AICPA TSC CC6.1" "ISO 27001:2013 A.8.1.1" "ISO 27001:2013 A.8.1.2" "ISO 27001:2013 A.12.5.1" ] } "Workflow":{"Status":"NEW"} "RecordState":"ACTIVE" }<line_sep><yield>finding<block_end><block_end><block_end>@registry.register_check("imagebuilder")<def_stmt>imagebuilder_ebs_encryption_check cache:dict awsAccountId:str awsRegion:str awsPartition:str<arrow>dict<block_start>"""[ImageBuilder.2] Image recipes should encrypt EBS volumes"""<line_sep>recipes=imagebuilder.list_image_recipes()<line_sep>recipes_list=recipes["imageRecipeSummaryList"]<line_sep>iso8601Time=datetime.datetime.now(datetime.timezone.utc).isoformat()<for_stmt>details recipes_list<block_start>recipeArn=details["arn"]<line_sep>recipe_name=details["name"]<line_sep>recipe=imagebuilder.get_image_recipe(imageRecipeArn=recipeArn)<line_sep>device_mapping=recipe["imageRecipe"]["blockDeviceMappings"]<line_sep>list1=device_mapping[0]<line_sep>ebs=list1["ebs"]<line_sep>ebs_encryption=ebs["encrypted"]<if_stmt>ebs_encryption<eq><true><block_start>finding={"SchemaVersion":"2018-10-08" "Id":recipeArn+"/imagebuilder-ebs-encryption-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":recipeArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices" "Effects/Data Exposure" ] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"INFORMATIONAL"} "Confidence":99 "Title":"[ImageBuilder.2] Image recipes should encrypt EBS volumes" "Description":"Image recipe "+recipe_name+" has EBS encrypted." "Remediation":{"Recommendation":{"Text":"For more information on EC2 Image Builder Security and EBS encyption refer to the How EC2 Image Builder Works section of the Amazon EC2 Image Builder Developer Guide." "Url":"https://docs.aws.amazon.com/imagebuilder/latest/userguide/how-image-builder-works.html#image-builder-components" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsImageBuilderRecipe" "Id":recipeArn "Partition":awsPartition "Region":awsRegion "Details":{"AwsImageBuilderRecipe":{"RecipeName":recipe_name}} }] "Compliance":{"Status":"PASSED" "RelatedRequirements":["NIST CSF ID.AM-2" "NIST SP 800-53 CM-8" "NIST SP 800-53 PM-5" "AICPA TSC CC3.2" "AICPA TSC CC6.1" "ISO 27001:2013 A.8.1.1" "ISO 27001:2013 A.8.1.2" "ISO 27001:2013 A.12.5.1" ] } "Workflow":{"Status":"RESOLVED"} "RecordState":"ARCHIVED" }<line_sep><yield>finding<block_end><else_stmt><block_start>finding={"SchemaVersion":"2018-10-08" "Id":recipeArn+"/imagebuilder-ebs-encryption-check" "ProductArn":f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default" "GeneratorId":recipeArn "AwsAccountId":awsAccountId "Types":["Software and Configuration Checks/AWS Security Best Practices" "Effects/Data Exposure" ] "FirstObservedAt":iso8601Time "CreatedAt":iso8601Time "UpdatedAt":iso8601Time "Severity":{"Label":"MEDIUM"} "Confidence":99 "Title":"[ImageBuilder.2] Image recipes should encrypt EBS volumes" "Description":"Image recipe "+recipe_name+" does not have EBS encrypted." "Remediation":{"Recommendation":{"Text":"For more information on EC2 Image Builder Security and EBS encyption refer to the How EC2 Image Builder Works section of the Amazon EC2 Image Builder Developer Guide." "Url":"https://docs.aws.amazon.com/imagebuilder/latest/userguide/how-image-builder-works.html#image-builder-components" }} "ProductFields":{"Product Name":"ElectricEye"} "Resources":[{"Type":"AwsImageBuilderRecipe" "Id":recipeArn "Partition":awsPartition "Region":awsRegion "Details":{"AwsImageBuilderRecipe":{"RecipeName":recipe_name}} }] "Compliance":{"Status":"FAILED" "RelatedRequirements":["NIST CSF ID.AM-2" "NIST SP 800-53 CM-8" "NIST SP 800-53 PM-5" "AICPA TSC CC3.2" "AICPA TSC CC6.1" "ISO 27001:2013 A.8.1.1" "ISO 27001:2013 A.8.1.2" "ISO 27001:2013 A.12.5.1" ] } "Workflow":{"Status":"NEW"} "RecordState":"ACTIVE" }<line_sep><yield>finding<block_end><block_end><block_end>
# Python imports. <import_from_stmt>collections defaultdict<import_stmt>copy<line_sep># Other imports. <import_from_stmt>simple_rl.planning Planner<import_from_stmt>simple_rl.planning ValueIteration<import_from_stmt>simple_rl.tasks GridWorldMDP<import_from_stmt>simple_rl.planning.BoundedRTDPClass BoundedRTDP<class_stmt>MonotoneLowerBound(Planner)<block_start><def_stmt>__init__ self mdp name='MonotoneUpperBound'<block_start>relaxed_mdp=MonotoneLowerBound._construct_deterministic_relaxation_mdp(mdp)<line_sep>Planner.__init__(self relaxed_mdp name)<line_sep>self.vi=ValueIteration(relaxed_mdp)<line_sep>self.states=self.vi.get_states()<line_sep>self.vi._compute_matrix_from_trans_func()<line_sep>self.vi.run_vi()<line_sep>self.lower_values=self._construct_lower_values()<block_end>@staticmethod<def_stmt>_construct_deterministic_relaxation_mdp mdp<block_start>relaxed_mdp=copy.deepcopy(mdp)<line_sep>relaxed_mdp.set_slip_prob(0.0)<line_sep><return>relaxed_mdp<block_end><def_stmt>_construct_lower_values self<block_start>values=defaultdict()<for_stmt>state self.states<block_start>values[state]=self.vi.get_value(state)<block_end><return>values<block_end><block_end><class_stmt>MonotoneUpperBound(Planner)<block_start><def_stmt>__init__ self mdp name='MonotoneUpperBound'<block_start>Planner.__init__(self mdp name)<line_sep>self.vi=ValueIteration(mdp)<line_sep>self.states=self.vi.get_states()<line_sep>self.upper_values=self._construct_upper_values()<block_end><def_stmt>_construct_upper_values self<block_start>values=defaultdict()<for_stmt>state self.states<block_start>values[state]=1./(1.-self.gamma)<block_end><return>values<block_end><block_end><def_stmt>main <block_start>test_mdp=GridWorldMDP(width=6 height=6 goal_locs=[(6 6)] slip_prob=0.2)<line_sep>lower_value_function=MonotoneLowerBound(test_mdp).lower_values<line_sep>upper_value_function=MonotoneUpperBound(test_mdp).upper_values<line_sep>bounded_rtdp=BoundedRTDP(test_mdp lower_values_init=lower_value_function upper_values_init=upper_value_function)<line_sep>test_policy=bounded_rtdp.plan()<line_sep>print('Derived policy:\n{}'.format(test_policy))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# tests/test_provider_circonus-labs_circonus.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:13:59 UTC) <def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.circonus_labs.circonus<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_check<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_contact_group<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_dashboard<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_graph<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_maintenance<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_metric<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_overlay_set<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_rule_set<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_rule_set_group<import_from_stmt>terrascript.resource.circonus_labs.circonus circonus_worksheet<block_end><def_stmt>test_datasource_import <block_start><import_from_stmt>terrascript.data.circonus_labs.circonus circonus_account<import_from_stmt>terrascript.data.circonus_labs.circonus circonus_collector<block_end># TODO: Shortcut imports without namespace for official and supported providers. # TODO: This has to be moved into a required_providers block. # def test_version_source(): # # import terrascript.provider.circonus_labs.circonus # # t = terrascript.provider.circonus_labs.circonus.circonus() # s = str(t) # # assert 'https://github.com/circonus-labs/terraform-provider-circonus' in s # assert '0.12.2' in s
""" Collection of functions the plugin can invoke. Most if not all should be non-blocking (@async) functions to keep the main UI thread from freezing. These functions should catch all unexpected exceptions so the plugin does not have to. Unexpected exceptions should return False. Expected exceptions should be caught by other modules this module uses. Log all unusual behavior. All @async functions have an optional callback parameter as the last argument. """<import_stmt>os<import_from_stmt>. logger<import_from_stmt>. settings<import_from_stmt>. http<import_from_stmt>. io<import_from_stmt>.async async<import_from_stmt>.theme Theme<line_sep>log=logger.get(__name__)<def_stmt>get_current_theme <block_start><return>settings.get_current_theme()<block_end><def_stmt>get_installed_themes <block_start>theme_filenames=os.listdir(settings.install_path())<line_sep>themes=[]<for_stmt>t theme_filenames<block_start><if_stmt>t.endswith('.tmTheme')<block_start>name=t.replace('.tmTheme' '')<line_sep>themes.append(Theme(name=name file_name=t))<block_end><block_end>themes.sort()<line_sep><return>themes<block_end>@async<def_stmt>fetch_repo <block_start>""" Get current theme archive in a new thread """<line_sep>archive=http.get(settings.repo_url())<line_sep>io.extract(archive settings.cache_path())<line_sep>themes_list=io.read_json(settings.themes_list_path())<line_sep>themes=[Theme.from_json(theme)<for>theme themes_list]<line_sep>themes={t.name:t<for>t themes}<line_sep><return>themes<block_end><def_stmt>_exists theme<block_start><if_stmt><not>os.path.exists(theme.cache_path.abs)<block_start>log.error('Path %s not found!' theme.cache_path.abs)<line_sep><return><false><block_end><return><true><block_end><def_stmt>preview_theme theme<block_start>log.debug('Previewing theme %s at %s' theme.name theme.cache_path.abs)<if_stmt><not>_exists(theme)<block_start><return><block_end>settings.set_theme(theme.cache_path.rel)<block_end><def_stmt>install_theme theme<block_start>log.debug('Installing theme %s to %s' theme.name theme.install_path.abs)<if_stmt><not>_exists(theme)<block_start><return><block_end>io.copy(theme.cache_path.abs theme.install_path.abs)<line_sep>settings.set_theme(theme.install_path.rel)<line_sep>settings.commit()<block_end><def_stmt>revert_theme path<block_start>log.debug('Reverting theme at path %s' path)<line_sep>settings.set_theme(path)<block_end><def_stmt>uninstall_theme theme<block_start>os.remove(theme.install_path.abs)<block_end>
<import_from_stmt>..view View<import_from_stmt>..layouts.grid GridLayout<class_stmt>GridView(View layout=GridLayout)<block_start>@property<def_stmt>grid self<arrow>GridLayout<block_start><assert_stmt>isinstance(self.layout GridLayout)<line_sep><return>self.layout<block_end><block_end>
upper_leg_frame=ReferenceFrame('U')<line_sep>torso_frame=ReferenceFrame('T')<line_sep>