content
stringlengths
0
1.55M
# Copyright 2021 EMQ Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_stmt>traceback<import_from_stmt>. reg<import_from_stmt>.connection SinkChannel<import_from_stmt>.symbol SymbolRuntime parse_context<import_from_stmt>..sink Sink<class_stmt>SinkRuntime(SymbolRuntime)<block_start><def_stmt>__init__ self ctrl:dict s:Sink<block_start>ctx=parse_context(ctrl)<line_sep>config={}<if_stmt>'config'<in>ctrl<block_start>config=ctrl['config']<block_end>s.configure(config)<line_sep>ch=SinkChannel(ctrl['meta'])<line_sep>self.s=s<line_sep>self.ctx=ctx<line_sep>self.ch=ch<line_sep>self.running=<false><line_sep>self.key=f"{ctrl['meta']['ruleId']}_{ctrl['meta']['opId']}"<concat>f"_{ctrl['meta']['instanceId']}_{ctrl['symbolName']}"<block_end><def_stmt>run self<block_start>logging.info('start running sink')<line_sep># noinspection PyBroadException <try_stmt><block_start>self.s.open(self.ctx)<line_sep>self.running=<true><line_sep>reg.setr(self.key self)<while_stmt><true><block_start>msg=self.ch.recv()<line_sep>self.s.collect(self.ctx msg)<block_end><block_end><except_stmt>Exception<block_start>"""two occasions: normal stop will close socket to raise an error OR stopped by unexpected error"""<if_stmt>self.running<block_start>logging.error(traceback.format_exc())<block_end><block_end><finally_stmt><block_start><if_stmt>self.running<block_start>self.stop()<block_end><block_end><block_end><def_stmt>stop self<block_start>self.running=<false><line_sep># noinspection PyBroadException <try_stmt><block_start>self.s.close(self.ctx)<line_sep>self.ch.close()<line_sep>reg.delete(self.key)<block_end><except_stmt>Exception<block_start>logging.error(traceback.format_exc())<block_end><block_end><def_stmt>is_running self<arrow>bool<block_start><return>self.running<block_end><block_end>
""" Copyright (c) 2017 Wind River Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. """<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>cryptodetector.exceptions InvalidMethodException<class_stmt>MethodFactory(ABCMeta)<block_start>"""Meta class creating a method class. Keeps track of all child classes that inherit from Method for later reference. """<def_stmt>__new__ mcs clsname bases dct<block_start><if_stmt><not>hasattr(MethodFactory "method_classes")<block_start>MethodFactory.method_classes=[]<block_end>method_class=super(MethodFactory mcs).__new__(mcs clsname bases dct)<if_stmt>bases<block_start><if_stmt><not>hasattr(method_class "method_id")<block_start><raise>InvalidMethodException("Method "+clsname+" requires "+"'method_id' attribute.")<block_end><if_stmt>method_class.method_id<in>[mc.method_id<for>mc MethodFactory.method_classes]<block_start><raise>InvalidMethodException("Method "+clsname+" has duplicate method_id '"+method_class.method_id+"'. method_id must be unique across all available methods.")<block_end>MethodFactory.method_classes.append(method_class)<block_end><return>method_class<block_end><block_end><class_stmt>Method(metaclass=MethodFactory)<block_start>"""Abstract base class providing the interface for a method """<line_sep># list of evidence types that all methods should ignore ignore_evidence_types=[]<line_sep>@abstractmethod<def_stmt>supports_scanning_file self language<block_start>"""Indicates whether this method supports scanning a file in the given language Args: language: language of the content (see langauges.py) Returns: (bool) whether it supports scanning a file in the given language """<line_sep><pass><block_end>@abstractmethod<def_stmt>search self content language<block_start>"""Search and find all matches in the content Args: content: the content to be scanned. Its type is string for text files and raw byte array for binary files. language: language of the content (see langauges.py) Returns: (list) list of matches. A match is a dict object containing all the output fields. """<line_sep><pass><block_end>@abstractmethod<def_stmt>quick_search self content language<block_start>"""Quick search the content in the given language Args: content: the content to be scanned. Its type is string for text files and raw byte array for binary files. language: language of the content (see langauges.py) Returns: (bool) whether it found any matches in the content """<line_sep><pass><block_end><block_end>
<import_from_stmt>.beginnertriathlete *<line_sep>
<import_stmt>torch<import_from_stmt>copy deepcopy<class_stmt>Mean<block_start>""" Running average of the values that are 'add'ed """<def_stmt>__init__ self update_weight=1<block_start>""" :param update_weight: 1 for normal, 2 for t-average """<line_sep>self.average=<none><line_sep>self.counter=0<line_sep>self.update_weight=update_weight<block_end><def_stmt>add self value weight=1<block_start>"""Add a value to the accumulator"""<line_sep>self.counter<augadd>weight<if_stmt>self.average<is><none><block_start>self.average=deepcopy(value)<block_end><else_stmt><block_start>delta=value-self.average<line_sep>self.average<augadd>delta<times>self.update_weight<times>weight/(self.counter+self.update_weight-1)<if_stmt>isinstance(self.average torch.Tensor)<block_start>self.average.detach()<block_end><block_end><block_end><def_stmt>value self<block_start>"""Access the current running average"""<line_sep><return>self.average<block_end><block_end><class_stmt>Max<block_start>""" Keeps track of the max of all the values that are 'add'ed """<def_stmt>__init__ self<block_start>self.max=<none><block_end><def_stmt>add self value<block_start>""" Add a value to the accumulator. :return: `true` if the provided value became the new max """<if_stmt>self.max<is><none><or>value<g>self.max<block_start>self.max=deepcopy(value)<line_sep><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>value self<block_start>"""Access the current running average"""<line_sep><return>self.max<block_end><block_end>
"""List current photoshop all documents."""<line_sep># Import local modules <import_stmt>photoshop.api<as>ps<line_sep>app=ps.Application()<line_sep>doc=app.documents[0]<line_sep>print(doc.name)<for_stmt>doc app.documents<block_start>print(doc.name)<block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- <import_from_stmt>._mesh_secret_operations MeshSecretOperations<import_from_stmt>._mesh_secret_value_operations MeshSecretValueOperations<import_from_stmt>._mesh_volume_operations MeshVolumeOperations<import_from_stmt>._mesh_network_operations MeshNetworkOperations<import_from_stmt>._mesh_application_operations MeshApplicationOperations<import_from_stmt>._mesh_service_operations MeshServiceOperations<import_from_stmt>._mesh_code_package_operations MeshCodePackageOperations<import_from_stmt>._mesh_service_replica_operations MeshServiceReplicaOperations<import_from_stmt>._mesh_gateway_operations MeshGatewayOperations<import_from_stmt>._service_fabric_client_ap_is_operations ServiceFabricClientAPIsOperationsMixin<line_sep>__all__=['MeshSecretOperations' 'MeshSecretValueOperations' 'MeshVolumeOperations' 'MeshNetworkOperations' 'MeshApplicationOperations' 'MeshServiceOperations' 'MeshCodePackageOperations' 'MeshServiceReplicaOperations' 'MeshGatewayOperations' 'ServiceFabricClientAPIsOperationsMixin' ]<line_sep>
<import_from_stmt>.exceptions HTTPException OktaAPIException# noqa
""" tmtoolkit – Text Mining and Topic Modeling Toolkit for Python CLI module <NAME> <<EMAIL>> """<if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<import_stmt>subprocess<import_stmt>json<import_from_stmt>tmtoolkit.preprocess DEFAULT_LANGUAGE_MODELS<def_stmt>_setup args<block_start><try_stmt><block_start><import_stmt>spacy<import_from_stmt>spacy.cli.download download<block_end><except_stmt>ImportError<block_start>print('error: required package "spacy" is not installed' file=sys.stderr)<line_sep>exit(1)<block_end><if_stmt><not>args<block_start>print('error: you must pass a list of two-letter ISO 639-1 language codes to install the respective '<concat>'language models or the string "all" to install all available language models' file=sys.stderr)<line_sep>exit(2)<block_end><else_stmt><block_start><try_stmt><block_start>args.pop(args.index('--no-update'))<line_sep>no_update=<true><block_end><except_stmt>ValueError<block_start>no_update=<false><block_end><if_stmt>args<eq>['all']<block_start>install_languages=list(DEFAULT_LANGUAGE_MODELS.keys())<block_end><else_stmt><block_start>install_languages=[]<for_stmt>arg args<block_start>install_languages.extend([l<for>l map(str.strip arg.split(','))<if>l])<block_end><block_end><block_end>print('checking if required spaCy data packages are installed...')<try_stmt><block_start>piplist_str=subprocess.check_output([sys.executable '-m' 'pip' 'list' '--disable-pip-version-check' '--format' 'json'])<block_end><except_stmt>subprocess.CalledProcessError<as>exc<block_start>print('error: calling pip failed with the following error message\n'+str(exc) file=sys.stderr)<line_sep>exit(3)<block_end>piplist=json.loads(piplist_str)<line_sep>installed_pkgs=set(item['name']<for>item piplist)<line_sep>model_pkgs=dict(zip(DEFAULT_LANGUAGE_MODELS.keys() map(<lambda>x:x.replace('_' '-')+'-sm' DEFAULT_LANGUAGE_MODELS.values())))<for_stmt>lang install_languages<block_start><if_stmt>lang<not><in>DEFAULT_LANGUAGE_MODELS.keys()<block_start>print('error: no language model for language code "%s"'%lang file=sys.stderr)<line_sep>exit(4)<block_end>lang_model_pkg=model_pkgs[lang]<if_stmt>no_update<and>lang_model_pkg<in>installed_pkgs<block_start>print('language model package "%s" for language code "%s" is already installed -- skipping'%(lang_model_pkg lang))<line_sep><continue><block_end>lang_model=DEFAULT_LANGUAGE_MODELS[lang]+'_sm'<line_sep>print('installing language model "%s" for language code "%s"...'%(lang_model lang))<line_sep>download(lang_model)<block_end>print('done.')<block_end>commands={'setup':_setup}<if_stmt>len(sys.argv)<le>1<block_start>print('available commands: '+', '.join(commands.keys()))<line_sep>exit(1)<block_end>cmd=sys.argv[1]<if_stmt>cmd<in>commands.keys()<block_start>commands[cmd](sys.argv[2:])<block_end><else_stmt><block_start>print('command not supported:' cmd file=sys.stderr)<line_sep>exit(2)<block_end><block_end>
# Copyright 2019 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for google.colab.data_table."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>unittest<import_stmt>IPython<import_stmt>pandas<as>pd<import_from_stmt>google.colab data_table<line_sep># pylint:disable=g-import-not-at-top <try_stmt><block_start><import_from_stmt>unittest mock# pylint:disable=g-importing-member <block_end><except_stmt>ImportError<block_start><import_stmt>mock<block_end># pylint:enable=g-import-not-at-top <class_stmt>DataTableTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>super(DataTableTest self).setUp()<line_sep>self.ip_patcher=mock.patch.object(IPython 'get_ipython' autospec=<true>)<line_sep>get_ipython=self.ip_patcher.start()<line_sep>get_ipython.return_value=IPython.InteractiveShell()<block_end><def_stmt>tearDown self<block_start>self.ip_patcher.stop()<line_sep>super(DataTableTest self).tearDown()<block_end><def_stmt>testDataTable self<block_start>df=pd.DataFrame({'x':[12345 23456 34567] 'y':['abcde' 'bcdef' 'cdefg']})<line_sep>dt=data_table.DataTable(df)<line_sep>html=dt._repr_html_()<for_stmt>col df.columns<block_start><for_stmt>val df[col]<block_start>self.assertIn('{}'.format(val) html)<block_end><block_end><block_end><def_stmt>testFormatterEnableDisable self<block_start><def_stmt>get_formatter <block_start>key=data_table._JAVASCRIPT_MODULE_MIME_TYPE<line_sep>formatters=IPython.get_ipython().display_formatter.formatters<if_stmt>key<in>formatters<block_start><return>formatters[key].for_type_by_name('pandas.core.frame' 'DataFrame')<block_end><else_stmt><block_start><return><none><block_end><block_end># default formatter is None. self.assertIsNone(get_formatter())<line_sep># enabling changes the formatter. data_table.enable_dataframe_formatter()<line_sep># classmethod identity is not preserved; compare reprs: self.assertEqual(repr(get_formatter()) repr(data_table.DataTable.formatter))<line_sep># disabling restores the default. data_table.disable_dataframe_formatter()<line_sep>self.assertIsNone(get_formatter())<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. _credential_properties={'blob':{'type':'string'} 'project_id':{'type':'string'} 'type':{'type':'string'} 'user_id':{'type':'string'}}<line_sep>credential_create={'type':'object' 'properties':_credential_properties 'additionalProperties':<true> 'oneOf':[{'title':'ec2 credential requires project_id' 'required':['blob' 'type' 'user_id' 'project_id'] 'properties':{'type':{'enum':['ec2']}}} {'title':'non-ec2 credential does not require project_id' 'required':['blob' 'type' 'user_id'] 'properties':{'type':{'not':{'enum':['ec2']}}}}]}<line_sep>credential_update={'type':'object' 'properties':_credential_properties 'minProperties':1 'additionalProperties':<true>}<line_sep>
<import_from_stmt>bpy.props StringProperty BoolProperty EnumProperty IntProperty <import_from_stmt>...rman_ui.rman_ui_base CollectionPanel<import_from_stmt>...rfb_logger rfb_log<import_from_stmt>...rman_operators.rman_operators_collections return_empty_list<import_from_stmt>...rman_config __RFB_CONFIG_DICT__<as>rfb_config<import_from_stmt>...rfb_utils object_utils<import_from_stmt>...rfb_utils scene_utils<import_stmt>bpy<import_stmt>re<class_stmt>RENDERMAN_UL_Volume_Aggregates_List(bpy.types.UIList)<block_start><def_stmt>draw_item self context layout data item icon active_data active_propname index<block_start>custom_icon='OBJECT_DATAMODE'<line_sep>layout.context_pointer_set("selected_obj" item.ob_pointer)<line_sep>op=layout.operator('renderman.remove_from_vol_aggregate' text='' icon='REMOVE')<line_sep>label=item.ob_pointer.name<line_sep>layout.label(text=label icon=custom_icon)<block_end><block_end><class_stmt>PRMAN_OT_Renderman_Open_Volume_Aggregates_Editor(CollectionPanel bpy.types.Operator)<block_start>bl_idname="scene.rman_open_vol_aggregates_editor"<line_sep>bl_label="RenderMan Volume Aggregates Editor"<line_sep>bl_description="Volume Aggregates Editor"<def_stmt>updated_object_selected_name self context<block_start>ob=context.scene.objects.get(self.selected_obj_name <none>)<if_stmt><not>ob<block_start><return><block_end><if_stmt>context.view_layer.objects.active<block_start>context.view_layer.objects.active.select_set(<false>)<block_end>ob.select_set(<true>)<line_sep>context.view_layer.objects.active=ob<block_end><def_stmt>obj_list_items self context<block_start>pattern=re.compile(self.object_search_filter)<line_sep>scene=context.scene<line_sep>rm=scene.renderman<if_stmt>self.do_object_filter<and>self.object_search_filter<eq>''<block_start><return>return_empty_list(label='No Objects Found')<block_end>group=rm.vol_aggregates[rm.vol_aggregates_index]<line_sep>objs_in_group=[]<for_stmt>member group.members<block_start>objs_in_group.append(member.ob_pointer.name)<block_end>items=[]<for_stmt>ob scene_utils.get_all_volume_objects(scene)<block_start>ob_name=ob.name<if_stmt>ob_name<not><in>objs_in_group<block_start><if_stmt>self.do_object_filter<and><not>re.match(pattern ob_name)<block_start><continue><block_end>items.append((ob_name ob_name ''))<block_end><block_end><if_stmt><not>items<block_start><return>return_empty_list(label='No Objects Found')<block_end><elif_stmt>self.do_object_filter<block_start>items.insert(0 ('0' 'Results (%d)'%len(items) '' '' 0))<block_end><else_stmt><block_start>items.insert(0 ('0' 'Select Object' '' '' 0))<block_end><return>items<block_end><def_stmt>update_do_object_filter self context<block_start>self.selected_obj_name='0'<block_end>do_object_filter:BoolProperty(name="Object Filter" description="Search and add multiple objects" default=<false> update=update_do_object_filter)<line_sep>object_search_filter:StringProperty(name="Object Filter Search" default="")<line_sep>selected_obj_name:EnumProperty(name="" items=obj_list_items update=updated_object_selected_name)<def_stmt>execute self context<block_start><return>{'FINISHED'}<block_end><def_stmt>draw self context<block_start>layout=self.layout<line_sep>scene=context.scene<line_sep>rm=scene.renderman<line_sep>layout.separator()<line_sep>self._draw_collection(context layout rm "Volume Aggregates" "renderman.add_remove_volume_aggregates" "scene.renderman" "vol_aggregates" "vol_aggregates_index" default_name='VolumeAggreagte_%d'%len(rm.vol_aggregates))<block_end><def_stmt>draw_objects_item self layout context item<block_start>row=layout.row()<line_sep>scene=context.scene<line_sep>rm=scene.renderman<line_sep>vol_aggregate=rm.vol_aggregates[rm.vol_aggregates_index]<line_sep>row=layout.row()<line_sep>row.separator()<line_sep>row.prop(self 'do_object_filter' text='' icon='FILTER' icon_only=<true>)<if_stmt><not>self.do_object_filter<block_start>row.prop(self 'selected_obj_name' text='')<line_sep>col=row.column()<if_stmt>self.selected_obj_name<eq>'0'<or>self.selected_obj_name<eq>''<block_start>col.enabled=<false><line_sep>op=col.operator("renderman.add_to_vol_aggregate" text='' icon='ADD')<line_sep>op.open_editor=<false><block_end><else_stmt><block_start>col.context_pointer_set('op_ptr' self)<line_sep>col.context_pointer_set('selected_obj' scene.objects[self.selected_obj_name])<line_sep>op=col.operator("renderman.add_to_vol_aggregate" text='' icon='ADD')<line_sep>op.vol_aggregates_index=rm.vol_aggregates_index<line_sep>op.do_scene_selected=<false><line_sep>op.open_editor=<false><block_end><block_end><else_stmt><block_start>row.prop(self 'object_search_filter' text='' icon='VIEWZOOM')<line_sep>row=layout.row()<line_sep>row.prop(self 'selected_obj_name')<line_sep>col=row.column()<if_stmt>self.selected_obj_name<eq>'0'<or>self.selected_obj_name<eq>''<block_start>col.enabled=<false><line_sep>op=col.operator("renderman.add_to_vol_aggregate" text='' icon='ADD')<line_sep>op.open_editor=<false><block_end><else_stmt><block_start>col.context_pointer_set('op_ptr' self)<line_sep>col.context_pointer_set('selected_obj' scene.objects[self.selected_obj_name])<line_sep>op=col.operator("renderman.add_to_vol_aggregate" text='' icon='ADD')<line_sep>op.vol_aggregates_index=rm.vol_aggregates_index<line_sep>op.do_scene_selected=<false><line_sep>op.open_editor=<false><block_end><block_end>row=layout.row()<line_sep>row.template_list('RENDERMAN_UL_Volume_Aggregates_List' "" vol_aggregate "members" vol_aggregate 'members_index' rows=6)<block_end><def_stmt>draw_item self layout context item<block_start>self.draw_objects_item(layout context item)<block_end><def_stmt>cancel self context<block_start><if_stmt>self.event<and>self.event.type<eq>'LEFTMOUSE'<block_start>bpy.ops.scene.rman_open_vol_aggregates_editor('INVOKE_DEFAULT')<block_end><block_end><def_stmt>__init__ self<block_start>self.event=<none><block_end><def_stmt>invoke self context event<block_start>wm=context.window_manager<line_sep>width=rfb_config['editor_preferences']['vol_aggregates_editor']['width']<line_sep>self.event=event<line_sep><return>wm.invoke_props_dialog(self width=width)<block_end><block_end>classes=[PRMAN_OT_Renderman_Open_Volume_Aggregates_Editor RENDERMAN_UL_Volume_Aggregates_List]<def_stmt>register <block_start><for_stmt>cls classes<block_start>bpy.utils.register_class(cls)<block_end><block_end><def_stmt>unregister <block_start><for_stmt>cls classes<block_start><try_stmt><block_start>bpy.utils.unregister_class(cls)<block_end><except_stmt>RuntimeError<block_start>rfb_log().debug('Could not unregister class: %s'%str(cls))<line_sep><pass><block_end><block_end><block_end>
""" Particular class of real traffic network @author: <NAME> """<import_stmt>configparser<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>os<import_stmt>seaborn<as>sns<import_stmt>time<import_from_stmt>envs.env PhaseMap PhaseSet TrafficSimulator<import_from_stmt>real_net.data.build_file gen_rou_file<line_sep>sns.set_color_codes()<line_sep>STATE_NAMES=['wave']<line_sep># node: (phase key, neighbor list) NODES={'10026':('6.0' ['9431' '9561' 'cluster_9563_9597' '9531']) '8794':('4.0' ['cluster_8985_9609' '9837' '9058' 'cluster_9563_9597']) '8940':('2.1' ['9007' '9429']) '8996':('2.2' ['cluster_9389_9689' '9713']) '9007':('2.3' ['9309' '8940']) '9058':('4.0' ['cluster_8985_9609' '8794' 'joinedS_0']) '9153':('2.0' ['9643']) '9309':('4.0' ['9466' '9007' 'cluster_9043_9052']) '9413':('2.3' ['9721' '9837']) '9429':('5.0' ['cluster_9043_9052' 'joinedS_1' '8940']) '9431':('2.4' ['9721' '9884' '9561' '10026']) '9433':('2.5' ['joinedS_1']) '9466':('4.0' ['9309' 'joinedS_0' 'cluster_9043_9052']) '9480':('2.3' ['8996' '9713']) '9531':('2.6' ['joinedS_1' '10026']) '9561':('4.0' ['cluster_9389_9689' '10026' '9431' '9884']) '9643':('2.3' ['9153']) '9713':('3.0' ['9721' '9884' '8996']) '9721':('6.0' ['9431' '9713' '9413']) '9837':('3.1' ['9413' '8794' 'cluster_8985_9609']) '9884':('2.7' ['9713' '9431' 'cluster_9389_9689' '9561']) 'cluster_8751_9630':('4.0' ['cluster_9389_9689']) 'cluster_8985_9609':('4.0' ['9837' '8794' '9058']) 'cluster_9043_9052':('4.1' ['cluster_9563_9597' '9466' '9309' '10026' 'joinedS_1']) 'cluster_9389_9689':('4.0' ['9884' '9561' 'cluster_8751_9630' '8996']) 'cluster_9563_9597':('4.2' ['10026' '8794' 'joinedS_0' 'cluster_9043_9052']) 'joinedS_0':('6.1' ['9058' 'cluster_9563_9597' '9466']) 'joinedS_1':('3.2' ['9531' '9429'])}<line_sep>PHASES={'4.0':['GGgrrrGGgrrr' 'rrrGGgrrrGGg' 'rrGrrrrrGrrr' 'rrrrrGrrrrrG'] '4.1':['GGgrrGGGrrr' 'rrGrrrrrrrr' 'rrrGgrrrGGg' 'rrrrGrrrrrG'] '4.2':['GGGGrrrrrrrr' 'GGggrrGGggrr' 'rrrGGGGrrrrr' 'grrGGggrrGGg'] '2.0':['GGrrr' 'ggGGG'] '2.1':['GGGrrr' 'rrGGGg'] '2.2':['Grr' 'gGG'] '2.3':['GGGgrr' 'GrrrGG'] '2.4':['GGGGrr' 'rrrrGG'] '2.5':['Gg' 'rG'] '2.6':['GGGg' 'rrrG'] '2.7':['GGg' 'rrG'] '3.0':['GGgrrrGGg' 'rrGrrrrrG' 'rrrGGGGrr'] '3.1':['GgrrGG' 'rGrrrr' 'rrGGGr'] '3.2':['GGGGrrrGG' 'rrrrGGGGr' 'GGGGrrGGr'] '5.0':['GGGGgrrrrGGGggrrrr' 'grrrGrrrrgrrGGrrrr' 'GGGGGrrrrrrrrrrrrr' 'rrrrrrrrrGGGGGrrrr' 'rrrrrGGggrrrrrggGg'] '6.0':['GGGgrrrGGGgrrr' 'rrrGrrrrrrGrrr' 'GGGGrrrrrrrrrr' 'rrrrrrrrrrGGGG' 'rrrrGGgrrrrGGg' 'rrrrrrGrrrrrrG'] '6.1':['GGgrrGGGrrrGGGgrrrGGGg' 'rrGrrrrrrrrrrrGrrrrrrG' 'GGGrrrrrGGgrrrrGGgrrrr' 'GGGrrrrrrrGrrrrrrGrrrr' 'rrrGGGrrrrrrrrrrrrGGGG' 'rrrGGGrrrrrGGGgrrrGGGg']}<class_stmt>RealNetPhase(PhaseMap)<block_start><def_stmt>__init__ self<block_start>self.phases={}<for_stmt>key,val PHASES.items()<block_start>self.phases[key]=PhaseSet(val)<block_end><block_end><block_end><class_stmt>RealNetController<block_start><def_stmt>__init__ self node_names nodes<block_start>self.name='greedy'<line_sep>self.node_names=node_names<line_sep>self.nodes=nodes<block_end><def_stmt>forward self obs<block_start>actions=[]<for_stmt>ob,node_name zip(obs self.node_names)<block_start>actions.append(self.greedy(ob node_name))<block_end><return>actions<block_end><def_stmt>greedy self ob node_name# get the action space <block_start>phases=PHASES[NODES[node_name][0]]<line_sep>flows=[]<line_sep>node=self.nodes[node_name]<line_sep># get the green waves <for_stmt>phase phases<block_start>wave=0<line_sep>visited_ilds=set()<for_stmt>i,signal enumerate(phase)<block_start><if_stmt>signal<eq>'G'# find controlled lane <block_start>lane=node.lanes_in[i]<line_sep># ild = 'ild:' + lane ild=lane<line_sep># if it has not been counted, add the wave <if_stmt>ild<not><in>visited_ilds<block_start>j=node.ilds_in.index(ild)<line_sep>wave<augadd>ob[j]<line_sep>visited_ilds.add(ild)<block_end><block_end><block_end>flows.append(wave)<block_end><return>np.argmax(np.array(flows))<block_end><block_end><class_stmt>RealNetEnv(TrafficSimulator)<block_start><def_stmt>__init__ self config port=0 output_path='' is_record=<false> record_stat=<false><block_start>self.flow_rate=config.getint('flow_rate')<line_sep>super().__init__(config output_path is_record record_stat port=port)<block_end><def_stmt>_get_node_phase_id self node_name<block_start><return>self.phase_node_map[node_name]<block_end><def_stmt>_init_neighbor_map self<block_start><return>dict([(key val[1])<for>key,val NODES.items()])<block_end><def_stmt>_init_map self<block_start>self.neighbor_map=self._init_neighbor_map()<line_sep>self.phase_map=RealNetPhase()<line_sep>self.phase_node_map=dict([(key val[0])<for>key,val NODES.items()])<line_sep>self.state_names=STATE_NAMES<block_end><def_stmt>_init_sim_config self seed# comment out to call build_file.py <block_start><return>gen_rou_file(self.data_path self.flow_rate seed=seed thread=self.sim_thread)<block_end><def_stmt>plot_stat self rewards<block_start>self.state_stat['reward']=rewards<for_stmt>name,data self.state_stat.items()<block_start>fig=plt.figure(figsize=(8 6))<line_sep>plot_cdf(data)<line_sep>plt.ylabel(name)<line_sep>fig.savefig(self.output_path+self.name+'_'+name+'.png')<block_end><block_end><block_end><def_stmt>plot_cdf X c='b' label=<none><block_start>sorted_data=np.sort(X)<line_sep>yvals=np.arange(len(sorted_data))/float(len(sorted_data)-1)<line_sep>plt.plot(sorted_data yvals color=c label=label)<block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s' level=logging.INFO)<line_sep>config=configparser.ConfigParser()<line_sep>config.read('./config/config_test_real.ini')<line_sep>base_dir='./output_result/'<if_stmt><not>os.path.exists(base_dir)<block_start>os.mkdir(base_dir)<block_end>env=RealNetEnv(config['ENV_CONFIG'] 2 base_dir is_record=<true> record_stat=<true>)<line_sep>env.train_mode=<false><line_sep>time.sleep(1)<line_sep># ob = env.reset(gui=True) controller=RealNetController(env.node_names env.nodes)<line_sep>env.init_test_seeds(list(range(10000 100001 10000)))<line_sep>rewards=[]<for_stmt>i range(10)<block_start>ob=env.reset(test_ind=i)<line_sep>global_rewards=[]<line_sep>cur_step=0<while_stmt><true><block_start>next_ob,reward,done,global_reward=env.step(controller.forward(ob))<line_sep># for node_name, node_ob in zip(env.node_names, next_ob): # logging.info('%d, %s:%r\n' % (cur_step, node_name, node_ob)) global_rewards.append(global_reward)<line_sep>rewards<augadd>list(reward)<line_sep>cur_step<augadd>1<if_stmt>done<block_start><break><block_end>ob=next_ob<block_end>env.terminate()<line_sep>logging.info('step: %d, avg reward: %.2f'%(cur_step np.mean(global_rewards)))<line_sep>time.sleep(1)<block_end>env.plot_stat(np.array(rewards))<line_sep>env.terminate()<line_sep>time.sleep(2)<line_sep>env.collect_tripinfo()<line_sep>env.output_data()<block_end>
<import_stmt>operator<import_from_stmt>.lazy *<import_from_stmt>.bits bits<line_sep>__all__=["dump_hex" "dump_bin" "dump_seq" "dump_mapseq"]<def_stmt>dump_hex data<block_start><def_stmt>to_hex data<block_start><try_stmt><block_start>data=memoryview(data)<block_end><except_stmt>TypeError<block_start>data=memoryview(bytes(data))<block_end><if_stmt>dump_hex.limit<is><none><or>len(data)<l>dump_hex.limit<block_start><return>data.hex()<block_end><else_stmt><block_start><return>"{}... ({} bytes total)".format(data[:dump_hex.limit].hex() len(data))<block_end><block_end><return>lazy(<lambda>:to_hex(data))<block_end>dump_hex.limit=64<def_stmt>dump_bin data<block_start><def_stmt>to_bin data<block_start>data=bits(data)<if_stmt>dump_bin.limit<is><none><or>len(data)<l>dump_bin.limit<block_start><return>str(data)[::-1]<block_end><else_stmt><block_start><return>"{}... ({} bits total)".format(str(data[:dump_bin.limit])[::-1] len(data))<block_end><block_end><return>lazy(<lambda>:to_bin(data))<block_end>dump_bin.limit=64<def_stmt>dump_seq joiner data<block_start><def_stmt>to_seq data<block_start><try_stmt><block_start>data_length=len(data)<block_end><except_stmt>TypeError<block_start><try_stmt><block_start>data_length=data.__length_hint__()<block_end><except_stmt>AttributeError<block_start>data_length=<none><block_end><block_end><if_stmt>dump_seq.limit<is><none><or>(data_length<is><not><none><and>data_length<l>dump_seq.limit)<block_start><return>joiner.join(data)<block_end><else_stmt><block_start><return>"{}... ({} elements total)".format(joiner.join(elem<for>elem,_ zip(data range(dump_seq.limit))) data_length<or>"?")<block_end><block_end><return>lazy(<lambda>:to_seq(data))<block_end>dump_seq.limit=16<def_stmt>dump_mapseq joiner mapper data<block_start><def_stmt>to_mapseq data<block_start><try_stmt><block_start>data_length=len(data)<block_end><except_stmt>TypeError<block_start><try_stmt><block_start>data_length=data.__length_hint__()<block_end><except_stmt>AttributeError<block_start>data_length=<none><block_end><block_end><if_stmt>dump_mapseq.limit<is><none><or>(data_length<is><not><none><and>data_length<l>dump_mapseq.limit)<block_start><return>joiner.join(map(mapper data))<block_end><else_stmt><block_start><return>"{}... ({} elements total)".format(joiner.join(mapper(elem)<for>elem,_ zip(data range(dump_mapseq.limit))) data_length<or>"?")<block_end><block_end><return>lazy(<lambda>:to_mapseq(data))<block_end>dump_mapseq.limit=16<line_sep>
"""empty message Revision ID: 1fe582999fec Revises: <PASSWORD> Create Date: 2019-11-19 11:12:38.940614 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<line_sep># revision identifiers, used by Alembic. revision='1fe582999fec'<line_sep>down_revision='<PASSWORD>'<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.drop_column('settings' 'conditional_posting_old')<line_sep># ### end Alembic commands ### <block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.add_column('settings' sa.Column('conditional_posting_old' sa.BOOLEAN() nullable=<false>))<line_sep># ### end Alembic commands ### <block_end>
<import_from_stmt>django.db.backends.base.validation BaseDatabaseValidation<class_stmt>CassandraDatabaseValidation(BaseDatabaseValidation)<block_start><pass><block_end>
# this tests numpy array simplification using RDP # 216804 --> 3061 points (98.5% reduction) # 50ms per VW operation on MBA Core i7 # Note that for the equivalent VW output we should reduce tolerance by an order of magnitude, then divide by 2 # e.g. 0.01 -> 0.0005 <import_from_stmt>simplification.cutil simplify_coords<import_stmt>json<import_stmt>numpy<as>np<with_stmt>open("test/coords_complex.json" "r")<as>f<block_start>coords=np.array(json.load(f))<block_end><for_stmt>x range(50)<block_start>simplify_coords(coords 0.01)<block_end>
<import_from_stmt>accessify implements<class_stmt>HumanSoulInterface<block_start><def_stmt>love self who *args **kwargs<block_start><pass><block_end><block_end><class_stmt>HumanBasicsInterface<block_start>@staticmethod<def_stmt>eat food *args allergy=<none> **kwargs<block_start><pass><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>@implements(HumanSoulInterface HumanBasicsInterface)<class_stmt>Human<block_start><def_stmt>love self who *args **kwargs<block_start><pass><block_end><block_end><block_end>
""" This is a backport and modernization of Cython's coverage.py support from the 3.* branch to the stable 0.29.* branch. Most importantly for us, it includes support for coverage.py's `exclude_lines` configuration option, allowing us to filter things like MemoryError from the coverage reports. It is standalone, and does not require Cython itself. The complete license for this file can be found at: https://github.com/cython/cython/blob/0.29.22/LICENSE.txt Changelog --------- 1.0.0 ^^^^^ - Support for excluded_lines - Fixed inconsistent quotations, PEP8'd. """<import_stmt>io<import_stmt>re<import_stmt>os.path<import_stmt>sys<import_from_stmt>collections defaultdict<import_from_stmt>coverage.plugin CoveragePlugin FileTracer FileReporter<import_from_stmt>coverage.files canonical_filename<line_sep>C_FILE_EXTENSIONS={'.c' '.cpp' '.cc' '.cxx'}<line_sep>MODULE_FILE_EXTENSIONS={'.py' '.pyx' '.pxd'}|C_FILE_EXTENSIONS<def_stmt>is_package_dir dir_path<block_start><for_stmt>filename ('__init__.py' '__init__.pyc' '__init__.pyx' '__init__.pxd')<block_start>path=os.path.join(dir_path filename)<if_stmt>path_exists(path)<block_start><return>1<block_end><block_end><block_end>_match_file_encoding=re.compile(br'(\w*coding)[:=]\s*([-\w.]+)').search<def_stmt>detect_opened_file_encoding f# PEPs 263 and 3120 # Most of the time the first two lines fall in the first couple of hundred # chars, and this bulk read/split is much faster. <block_start>lines=()<line_sep>start=b''<while_stmt>len(lines)<l>3<block_start>data=f.read(500)<line_sep>start<augadd>data<line_sep>lines=start.split(b'\n')<if_stmt><not>data<block_start><break><block_end><block_end>m=_match_file_encoding(lines[0])<if_stmt>m<and>m.group(1)<ne>b'c_string_encoding'<block_start><return>m.group(2).decode('iso8859-1')<block_end><elif_stmt>len(lines)<g>1<block_start>m=_match_file_encoding(lines[1])<if_stmt>m<block_start><return>m.group(2).decode('iso8859-1')<block_end><block_end><return>'UTF-8'<block_end><def_stmt>path_exists path# try on the filesystem first <block_start><if_stmt>os.path.exists(path)<block_start><return><true><block_end># figure out if a PEP 302 loader is around <try_stmt><block_start>loader=__loader__<line_sep># XXX the code below assumes a 'zipimport.zipimporter' instance # XXX should be easy to generalize, but too lazy right now to write it archive_path=getattr(loader 'archive' <none>)<if_stmt>archive_path<block_start>normpath=os.path.normpath(path)<if_stmt>normpath.startswith(archive_path)<block_start>arcname=normpath[len(archive_path)+1:]<try_stmt><block_start>loader.get_data(arcname)<line_sep><return><true><block_end><except_stmt>IOError<block_start><return><false><block_end><block_end><block_end><block_end><except_stmt>NameError<block_start><pass><block_end><return><false><block_end><def_stmt>find_root_package_dir file_path<block_start>dir=os.path.dirname(file_path)<if_stmt>file_path<eq>dir<block_start><return>dir<block_end><elif_stmt>is_package_dir(dir)<block_start><return>find_root_package_dir(dir)<block_end><else_stmt><block_start><return>dir<block_end><block_end><def_stmt>open_source_file source_filename encoding=<none> error_handling=<none><block_start>stream=<none><try_stmt><block_start><if_stmt>encoding<is><none># Most of the time the encoding is not specified, so try hard to # open the file only once. <block_start>f=io.open(source_filename 'rb')<line_sep>encoding=detect_opened_file_encoding(f)<line_sep>f.seek(0)<line_sep>stream=io.TextIOWrapper(f encoding=encoding errors=error_handling)<block_end><else_stmt><block_start>stream=io.open(source_filename encoding=encoding errors=error_handling)<block_end><block_end><except_stmt>OSError<block_start><if_stmt>os.path.exists(source_filename)<block_start><raise># File is there, but something went wrong reading from it. <block_end># Allow source files to be in zip files etc. <try_stmt><block_start>loader=__loader__<if_stmt>source_filename.startswith(loader.archive)<block_start>stream=open_source_from_loader(loader source_filename encoding error_handling)<block_end><block_end><except_stmt>(NameError AttributeError)<block_start><pass><block_end><block_end><if_stmt>stream<is><none><block_start><raise>FileNotFoundError(source_filename)<block_end><if_stmt>stream.read(1)<ne>u'\uFEFF'<block_start>stream.seek(0)<block_end><return>stream<block_end><def_stmt>_find_c_source base_path<block_start>file_exists=os.path.exists<for_stmt>ext C_FILE_EXTENSIONS<block_start>file_name=base_path+ext<if_stmt>file_exists(file_name)<block_start><return>file_name<block_end><block_end><return><none><block_end><def_stmt>_find_dep_file_path main_file file_path relative_path_search=<false><block_start>abs_path=os.path.abspath(file_path)<if_stmt><not>os.path.exists(abs_path)<and>(file_path.endswith('.pxi')<or>relative_path_search)# files are looked up relative to the main source file <block_start>rel_file_path=os.path.join(os.path.dirname(main_file) file_path)<if_stmt>os.path.exists(rel_file_path)<block_start>abs_path=os.path.abspath(rel_file_path)<block_end><block_end># search sys.path for external locations if a valid file hasn't been found <if_stmt><not>os.path.exists(abs_path)<block_start><for_stmt>sys_path sys.path<block_start>test_path=os.path.realpath(os.path.join(sys_path file_path))<if_stmt>os.path.exists(test_path)<block_start><return>canonical_filename(test_path)<block_end><block_end><block_end><return>canonical_filename(abs_path)<block_end><class_stmt>Plugin(CoveragePlugin)# map from traced file paths to absolute file paths <block_start>_file_path_map=<none><line_sep># map from traced file paths to corresponding C files _c_files_map=<none><line_sep># map from parsed C files to their content _parsed_c_files=<none><line_sep># map from traced files to lines that are excluded from coverage _excluded_lines_map=<none><line_sep># list of regex patterns for lines to exclude _excluded_line_patterns=()<def_stmt>sys_info self<block_start><return>[]<block_end><def_stmt>configure self config# Entry point for coverage "configurer". # Read the regular expressions from the coverage config that match # lines to be excluded from coverage. <block_start>self._excluded_line_patterns=config.get_option('report:exclude_lines')<block_end><def_stmt>file_tracer self filename<block_start>""" Try to find a C source file for a file path found by the tracer. """<if_stmt>filename.startswith('<')<or>filename.startswith('memory:')<block_start><return><none><block_end>c_file=py_file=<none><line_sep>filename=canonical_filename(os.path.abspath(filename))<if_stmt>self._c_files_map<and>filename<in>self._c_files_map<block_start>c_file=self._c_files_map[filename][0]<block_end><if_stmt>c_file<is><none><block_start>c_file,py_file=self._find_source_files(filename)<if_stmt><not>c_file<block_start><return><none><block_end># unknown file # parse all source file paths and lines from C file # to learn about all relevant source files right away (pyx/pxi/pxd) # FIXME: this might already be too late if the first executed line # is not from the main .pyx file but a file with a different # name than the .c file (which prevents us from finding the # .c file) _,code=self._read_source_lines(c_file filename)<if_stmt>code<is><none><block_start><return><none><block_end><block_end># no source found <if_stmt>self._file_path_map<is><none><block_start>self._file_path_map={}<block_end><return>CythonModuleTracer(filename py_file c_file self._c_files_map self._file_path_map)<block_end><def_stmt>file_reporter self filename# TODO: let coverage.py handle .py files itself # ext = os.path.splitext(filename)[1].lower() # if ext == '.py': # from coverage.python import PythonFileReporter # return PythonFileReporter(filename) <block_start>filename=canonical_filename(os.path.abspath(filename))<if_stmt>self._c_files_map<and>filename<in>self._c_files_map<block_start>c_file,rel_file_path,code=self._c_files_map[filename]<block_end><else_stmt><block_start>c_file,_=self._find_source_files(filename)<if_stmt><not>c_file<block_start><return><none><block_end># unknown file rel_file_path,code=self._read_source_lines(c_file filename)<if_stmt>code<is><none><block_start><return><none><block_end><block_end># no source found <return>CythonModuleReporter(c_file filename rel_file_path code self._excluded_lines_map.get(rel_file_path frozenset()))<block_end><def_stmt>_find_source_files self filename<block_start>basename,ext=os.path.splitext(filename)<line_sep>ext=ext.lower()<if_stmt>ext<in>MODULE_FILE_EXTENSIONS<block_start><pass><block_end><elif_stmt>ext<eq>'.pyd'# Windows extension module <block_start>platform_suffix=re.search(r'[.]cp[0-9]+-win[_a-z0-9]*$' basename re.I)<if_stmt>platform_suffix<block_start>basename=basename[:platform_suffix.start()]<block_end><block_end><elif_stmt>ext<eq>'.so'# Linux/Unix/Mac extension module <block_start>platform_suffix=re.search(r'[.](?:cpython|pypy)-[0-9]+[-_a-z0-9]*$' basename re.I)<if_stmt>platform_suffix<block_start>basename=basename[:platform_suffix.start()]<block_end><block_end><elif_stmt>ext<eq>'.pxi'# if we get here, it means that the first traced line of a Cython # module was not in the main module but in an include file, so try # a little harder to find the main source file <block_start>self._find_c_source_files(os.path.dirname(filename) filename)<if_stmt>filename<in>self._c_files_map<block_start><return>self._c_files_map[filename][0] <none><block_end><block_end><else_stmt># none of our business <block_start><return><none> <none><block_end><if_stmt>ext<in>C_FILE_EXTENSIONS<block_start>c_file=filename<block_end><else_stmt><block_start>c_file=_find_c_source(basename)<block_end><if_stmt>c_file<is><none># a module "pkg/mod.so" can have a source file "pkg/pkg.mod.c" <block_start>package_root=find_root_package_dir(filename)<line_sep>package_path=os.path.relpath(basename package_root).split(os.path.sep)<if_stmt>len(package_path)<g>1<block_start>test_basepath=os.path.join(os.path.dirname(filename) '.'.join(package_path))<line_sep>c_file=_find_c_source(test_basepath)<block_end><block_end>py_source_file=<none><if_stmt>c_file<block_start>py_source_file=os.path.splitext(c_file)[0]+'.py'<if_stmt><not>os.path.exists(py_source_file)<block_start>py_source_file=<none><block_end><try_stmt><block_start><with_stmt>open(c_file 'rb')<as>f<block_start><if_stmt>b'/* Generated by Cython '<not><in>f.read(30)<block_start><return><none> <none># not a Cython file <block_end><block_end><block_end><except_stmt>(IOError OSError)<block_start>c_file=<none><block_end><block_end><return>c_file py_source_file<block_end><def_stmt>_find_c_source_files self dir_path source_file<block_start>""" Desperately parse all C files in the directory or its package parents (not re-descending) to find the (included) source file in one of them. """<if_stmt><not>os.path.isdir(dir_path)<block_start><return><block_end>splitext=os.path.splitext<for_stmt>filename os.listdir(dir_path)<block_start>ext=splitext(filename)[1].lower()<if_stmt>ext<in>C_FILE_EXTENSIONS<block_start>self._read_source_lines(os.path.join(dir_path filename) source_file)<if_stmt>source_file<in>self._c_files_map<block_start><return><block_end><block_end><block_end># not found? then try one package up <if_stmt>is_package_dir(dir_path)<block_start>self._find_c_source_files(os.path.dirname(dir_path) source_file)<block_end><block_end><def_stmt>_read_source_lines self c_file sourcefile<block_start>""" Parse a Cython generated C/C++ source file and find the executable lines. Each executable line starts with a comment header that states source file and line number, as well as the surrounding range of source code lines. """<if_stmt>self._parsed_c_files<is><none><block_start>self._parsed_c_files={}<block_end><if_stmt>c_file<in>self._parsed_c_files<block_start>code_lines=self._parsed_c_files[c_file]<block_end><else_stmt><block_start>code_lines=self._parse_cfile_lines(c_file)<line_sep>self._parsed_c_files[c_file]=code_lines<block_end><if_stmt>self._c_files_map<is><none><block_start>self._c_files_map={}<block_end><for_stmt>filename,code code_lines.items()<block_start>abs_path=_find_dep_file_path(c_file filename relative_path_search=<true>)<line_sep>self._c_files_map[abs_path]=(c_file filename code)<block_end><if_stmt>sourcefile<not><in>self._c_files_map<block_start><return>(<none> )<times>2# e.g. shared library file <block_end><return>self._c_files_map[sourcefile][1:]<block_end><def_stmt>_parse_cfile_lines self c_file<block_start>""" Parse a C file and extract all source file lines that generated executable code. """<line_sep>match_source_path_line=re.compile(r' */[*] +"(.*)":([0-9]+)$').match<line_sep>match_current_code_line=re.compile(r' *[*] (.*) # <<<<<<+$').match<line_sep>match_comment_end=re.compile(r' *[*]/$').match<line_sep>match_trace_line=re.compile(r' *__Pyx_TraceLine\(([0-9]+),').match<line_sep>not_executable=re.compile(r'\s*c(?:type)?def\s+'<concat>r'(?:(?:public|external)\s+)?'<concat>r'(?:struct|union|enum|class)'<concat>r'(\s+[^:]+|)\s*:').match<line_sep>line_is_excluded=<none><if_stmt>self._excluded_line_patterns<block_start>line_is_excluded=re.compile('|'.join(['(?:{0})'.format(regex)<for>regex self._excluded_line_patterns])).search<block_end>code_lines=defaultdict(dict)<line_sep>executable_lines=defaultdict(set)<line_sep>current_filename=<none><if_stmt>self._excluded_lines_map<is><none><block_start>self._excluded_lines_map=defaultdict(set)<block_end><with_stmt>open(c_file)<as>lines<block_start>lines=iter(lines)<for_stmt>line lines<block_start>match=match_source_path_line(line)<if_stmt><not>match<block_start><if_stmt>('__Pyx_TraceLine('<in>line<and>current_filename<is><not><none>)<block_start>trace_line=match_trace_line(line)<if_stmt>trace_line<block_start>executable_lines[current_filename].add(int(trace_line.group(1)))<block_end><block_end><continue><block_end>filename,lineno=match.groups()<line_sep>current_filename=filename<line_sep>lineno=int(lineno)<for_stmt>comment_line lines<block_start>match=match_current_code_line(comment_line)<if_stmt>match<block_start>code_line=match.group(1).rstrip()<if_stmt>not_executable(code_line)<block_start><break><block_end><if_stmt>(line_is_excluded<is><not><none><and>line_is_excluded(code_line))<block_start>self._excluded_lines_map[filename].add(lineno)<line_sep><break><block_end>code_lines[filename][lineno]=code_line<line_sep><break><block_end><elif_stmt>match_comment_end(comment_line)# unexpected comment format - false positive? <block_start><break><block_end><block_end><block_end><block_end># Remove lines that generated code but are not traceable. <for_stmt>filename,lines code_lines.items()<block_start>dead_lines=set(lines).difference(executable_lines.get(filename ()))<for_stmt>lineno dead_lines<block_start><del_stmt>lines[lineno]<block_end><block_end><return>code_lines<block_end><block_end><class_stmt>CythonModuleTracer(FileTracer)<block_start>""" Find the Python/Cython source file for a Cython module. """<def_stmt>__init__ self module_file py_file c_file c_files_map file_path_map<block_start>super(CythonModuleTracer self).__init__()<line_sep>self.module_file=module_file<line_sep>self.py_file=py_file<line_sep>self.c_file=c_file<line_sep>self._c_files_map=c_files_map<line_sep>self._file_path_map=file_path_map<block_end><def_stmt>has_dynamic_source_filename self<block_start><return><true><block_end><def_stmt>dynamic_source_filename self filename frame<block_start>""" Determine source file path. Called by the function call tracer. """<line_sep>source_file=frame.f_code.co_filename<try_stmt><block_start><return>self._file_path_map[source_file]<block_end><except_stmt>KeyError<block_start><pass><block_end>abs_path=_find_dep_file_path(filename source_file)<if_stmt>self.py_file<and>source_file[-3:].lower()<eq>'.py'# always let coverage.py handle this case itself <block_start>self._file_path_map[source_file]=self.py_file<line_sep><return>self.py_file<block_end><assert_stmt>self._c_files_map<is><not><none><if_stmt>abs_path<not><in>self._c_files_map<block_start>self._c_files_map[abs_path]=(self.c_file source_file <none>)<block_end>self._file_path_map[source_file]=abs_path<line_sep><return>abs_path<block_end><block_end><class_stmt>CythonModuleReporter(FileReporter)<block_start>""" Provide detailed trace information for one source file to coverage.py. """<def_stmt>__init__ self c_file source_file rel_file_path code excluded_lines<block_start>super(CythonModuleReporter self).__init__(source_file)<line_sep>self.name=rel_file_path<line_sep>self.c_file=c_file<line_sep>self._code=code<line_sep>self._excluded_lines=excluded_lines<block_end><def_stmt>lines self<block_start>""" Return set of line numbers that are possibly executable. """<line_sep><return>set(self._code)<block_end><def_stmt>excluded_lines self<block_start>""" Return set of line numbers that are excluded from coverage. """<line_sep><return>self._excluded_lines<block_end><def_stmt>_iter_source_tokens self<block_start>current_line=1<for_stmt>line_no,code_line sorted(self._code.items())<block_start><while_stmt>line_no<g>current_line<block_start><yield>[]<line_sep>current_line<augadd>1<block_end><yield>[('txt' code_line)]<line_sep>current_line<augadd>1<block_end><block_end><def_stmt>source self<block_start>""" Return the source code of the file as a string. """<if_stmt>os.path.exists(self.filename)<block_start><with_stmt>open_source_file(self.filename)<as>f<block_start><return>f.read()<block_end><block_end><else_stmt><block_start><return>'\n'.join((tokens[0][1]<if>tokens<else>'')<for>tokens self._iter_source_tokens())<block_end><block_end><def_stmt>source_token_lines self<block_start>""" Iterate over the source code tokens. """<if_stmt>os.path.exists(self.filename)<block_start><with_stmt>open_source_file(self.filename)<as>f<block_start><for_stmt>line f<block_start><yield>[('txt' line.rstrip('\n'))]<block_end><block_end><block_end><else_stmt><block_start><for_stmt>line self._iter_source_tokens()<block_start><yield>[('txt' line)]<block_end><block_end><block_end><block_end><def_stmt>coverage_init reg options<block_start>plugin=Plugin()<line_sep>reg.add_configurer(plugin)<line_sep>reg.add_file_tracer(plugin)<block_end>
<import_stmt>json<import_from_stmt>pathlib Path<import_from_stmt>tempfile NamedTemporaryFile<import_from_stmt>textwrap dedent<import_stmt>pytest<import_from_stmt>attack_flow.schema anchor generate_html get_properties html_name insert_html InvalidRelationshipsError SchemaProperty validate_docs validate_rules <line_sep>PROJECT_ROOT=Path(__file__).resolve().parent.parent<line_sep>SCHEMA_PATH=PROJECT_ROOT/"schema"/"attack-flow-2022-01-05-draft.json"<def_stmt>test_validate_docs <block_start>doc1_json={"flow":{"type":"attack-flow" "id":"https://flow-v1/doc1" "name":"Test Attack Flow" "created":"2021-12-17T08:31:22.320133-05:00"} "actions":[] "assets":[] "relationships":[] "object_properties":[] "data_properties":[] }<line_sep>doc2_json={# Missing required name field: "flow":{"type":"attack-flow" "id":"https://flow-v1/doc1" "created":"bogus date" } "actions":[] "assets":[] "relationships":[] "object_properties":[] "data_properties":[] }<with_stmt>SCHEMA_PATH.open()<as>schema_file NamedTemporaryFile('w+')<as>doc1_file NamedTemporaryFile('w+')<as>doc2_file<block_start>json.dump(doc1_json doc1_file)<line_sep>json.dump(doc2_json doc2_file)<line_sep>schema_file.seek(0)<line_sep>doc1_file.seek(0)<line_sep>doc2_file.seek(0)<line_sep>results_one_file=validate_docs(schema_file.name doc1_file.name)<line_sep>results_two_files=validate_docs(schema_file.name [doc1_file.name doc2_file.name])<block_end><assert_stmt>results_one_file[0]<is><none><assert_stmt>results_two_files[0]<is><none><assert_stmt>isinstance(results_two_files[1] Exception)<block_end><def_stmt>test_schema_property_string <block_start>sp=SchemaProperty('test-prop' <false> {'description':'My description :>' 'type':'string' })<assert_stmt>sp.name<eq>'test-prop'<assert_stmt>sp.type<eq>'string'<assert_stmt><not>sp.required<assert_stmt>sp.html_type<eq>'string'<assert_stmt>sp.html_description<eq>'My description :&gt;'<block_end><def_stmt>test_schema_property_uuid <block_start>sp=SchemaProperty('test-uuid' <true> {'description':'My description :>' 'type':'string' 'format':'uuid' })<assert_stmt>sp.name<eq>'test-uuid'<assert_stmt>sp.type<eq>'string'<assert_stmt>sp.required<assert_stmt>sp.html_type<eq>'uuid'<assert_stmt>sp.html_description<eq>'My description :&gt;'<block_end><def_stmt>test_schema_property_datetime <block_start>sp=SchemaProperty('test-datetime' <true> {'description':'My description' 'type':'string' 'format':'date-time' })<assert_stmt>sp.name<eq>'test-datetime'<assert_stmt>sp.type<eq>'string'<assert_stmt>sp.required<assert_stmt>sp.html_type<eq>'date-time'<assert_stmt>sp.html_description<eq>'My description (RFC-3339 format, e.g. YYYY-MM-DDThh:mm:ssZ)'<block_end><def_stmt>test_schema_property_array_of_string <block_start>sp=SchemaProperty('test-array' <true> {'description':'My description' 'type':'array' 'items':{'type':'string'}})<assert_stmt>sp.name<eq>'test-array'<assert_stmt>sp.type<eq>'array'<assert_stmt>sp.subtype<eq>'string'<assert_stmt>sp.required<assert_stmt>sp.html_type<eq>'array of string'<assert_stmt>sp.html_description<eq>'My description'<block_end><def_stmt>test_schema_property_array_of_object <block_start>sp=SchemaProperty('test-array2' <true> {'description':'My description' 'type':'array' 'items':{'type':'object'}})<assert_stmt>sp.name<eq>'test-array2'<assert_stmt>sp.type<eq>'array'<assert_stmt>sp.subtype<eq>'object'<assert_stmt>sp.required<assert_stmt>sp.html_type<eq>'array of <a href="#testarray2">test-array2</a>'<assert_stmt>sp.html_description<eq>'My description'<block_end><def_stmt>test_schema_property_object <block_start>sp=SchemaProperty('test-object' <true> {'description':'My description' 'type':'object' 'properties':{'foo':'string'}})<assert_stmt>sp.name<eq>'test-object'<assert_stmt>sp.type<eq>'object'<assert_stmt>sp.subtype<eq>''<assert_stmt>sp.required<assert_stmt>sp.html_type<eq>'<a href="#testobject">test-object</a> object'<assert_stmt>sp.html_description<eq>'My description'<block_end><def_stmt>test_schema_property_enum <block_start>sp=SchemaProperty('test-enum' <true> {'description':'My description' 'type':'string' 'enum':['foo' 'bar']})<assert_stmt>sp.name<eq>'test-enum'<assert_stmt>sp.type<eq>'string'<assert_stmt>sp.required<assert_stmt>sp.html_type<eq>'enum'<assert_stmt>sp.html_description<eq>'My description (Enum values: "foo", "bar")'<block_end><def_stmt>test_get_properties <block_start>schema={'type':'object' 'properties':{'name':{'description':'My name' 'type':'string'} 'hobbies':{'description':'My hobbies' 'type':'array' 'items':{'type':'string'}} 'cars':{'description':'My cars' 'type':'array' 'items':{'type':'object' 'properties':{'make':{'description':'The auto manufacturer' 'type':'string' } 'model':{'description':'The model name' 'type':'string' } }}} 'address':{'description':'My address' 'type':'object' 'properties':{'city':{'description':'My city' 'type':'string'} 'state':{'description':'My state' 'type':'string'}}}}}<line_sep>props=get_properties(schema node='root')<assert_stmt>'root'<in>props<line_sep>root=props['root']<assert_stmt>root['name'].type<eq>'string'<assert_stmt>'address'<in>props<line_sep>address=props['address']<assert_stmt>address['city'].type<eq>'string'<block_end><def_stmt>test_generate_html <block_start>actual_html=generate_html({'__root__':{'prop1':SchemaProperty('prop1' <false> {'description':'prop1 description' 'type':'string' }) 'prop2':SchemaProperty('prop2' <true> {'description':'prop2 description' 'type':'string' })} 'subtype':{'prop3':SchemaProperty('prop3' <true> {'description':'prop3 description' 'type':'string'})}})<line_sep>expected_html=['<h3 id="TopLevel">Top Level Fields</h3>' '<table>' ' <tr>' ' <th>Name</th>' ' <th>Type</th>' ' <th>Required</th>' ' <th>Description</th>' ' </tr>' ' <tr>' ' <td>prop1</td>' ' <td>string</td>' ' <td>No</td>' ' <td>prop1 description</td>' ' </tr>' ' <tr>' ' <td>prop2</td>' ' <td>string</td>' ' <td>Yes</td>' ' <td>prop2 description</td>' ' </tr>' '</table>' '' '<h3 id="subtype">Subtype Fields</h3>' '<table>' ' <tr>' ' <th>Name</th>' ' <th>Type</th>' ' <th>Required</th>' ' <th>Description</th>' ' </tr>' ' <tr>' ' <td>prop3</td>' ' <td>string</td>' ' <td>Yes</td>' ' <td>prop3 description</td>' ' </tr>' '</table>' '' ]<assert_stmt>actual_html<eq>expected_html<block_end><def_stmt>test_anchor <block_start><assert_stmt>anchor('? ASDF; 123 ')<eq>'ASDF123'<block_end><def_stmt>test_insert_html <block_start>old_doc=iter(['old text 1' 'old text 2' '<!--JSON_SCHEMA-->' 'old html 1' 'old html 2' '<!--/JSON_SCHEMA-->' 'old text 3' 'old text 4' ])<line_sep>html=['new html 1' 'new html 2' ]<line_sep>actual=iter(insert_html(old_doc html).splitlines())<assert_stmt>next(actual)<eq>'old text 1'<assert_stmt>next(actual)<eq>'old text 2'<assert_stmt>next(actual).startswith('<!--JSON_SCHEMA')<assert_stmt>next(actual)<eq>'new html 1'<assert_stmt>next(actual)<eq>'new html 2'<assert_stmt>next(actual)<eq>'<!--/JSON_SCHEMA-->'<assert_stmt>next(actual)<eq>'old text 3'<assert_stmt>next(actual)<eq>'old text 4'<block_end><def_stmt>test_insert_html_no_start_tag <block_start>old_doc=iter(['old text 1' 'old text 2' '<!--/JSON_SCHEMA-->' 'old text 3' 'old text 4' ])<with_stmt>pytest.raises(Exception)<block_start>insert_html(old_doc []).splitlines()<block_end><block_end><def_stmt>test_insert_html_no_end_tag <block_start>old_doc=iter(['old text 1' 'old text 2' '<!--JSON_SCHEMA-->' 'old text 3' 'old text 4' ])<with_stmt>pytest.raises(Exception)<block_start>insert_html(old_doc []).splitlines()<block_end><block_end><def_stmt>test_validate_rules <block_start>flow={"flow":{"type":"attack-flow" "id":"https://flow-v1" "name":"Test Attack Flow" "created":"2021-12-17T08:31:22.320133-05:00"} "actions":[{"id":"action1" "name":"action-one" } ] "assets":[{"id":"asset1"} ] "relationships":[{"source":"action1" "target":"asset1" } {"source":"asset1" "target":"action2" } {"source":"action2" "target":"asset2" } ] }<with_stmt>pytest.raises(InvalidRelationshipsError)<as>exc_info<block_start>validate_rules(flow)<block_end>exc=exc_info.value<assert_stmt>str(exc)<eq>dedent("""\ - Relationship target ID "action2" does not exist. - Relationship source ID "action2" does not exist. - Relationship target ID "asset2" does not exist.""")<block_end><def_stmt>test_html_name <block_start><assert_stmt>html_name("foo")<eq>"Foo"<assert_stmt>html_name("foo_bar")<eq>"Foo Bar"<block_end>
"""Original NetworkX graph tests"""<import_stmt>networkx<import_stmt>networkx<as>nx<import_from_stmt>.historical_tests HistoricalTests<class_stmt>TestGraphHistorical(HistoricalTests)<block_start>@classmethod<def_stmt>setup_class cls<block_start>HistoricalTests.setup_class()<line_sep>cls.G=nx.Graph<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function with_statement<import_stmt>os<import_from_stmt>turbo.conf app_config<import_from_stmt>turbo.util get_base_dir import_object<def_stmt>_install_app package_space<block_start><for_stmt>app getattr(import_object('apps.settings' package_space) 'INSTALLED_APPS')<block_start>import_object('.'.join(['apps' app]) package_space)<block_end><block_end><def_stmt>register_app app_name app_setting web_application_setting mainfile package_space<block_start>"""insert current project root path into sys path """<import_from_stmt>turbo log<line_sep>app_config.app_name=app_name<line_sep>app_config.app_setting=app_setting<line_sep>app_config.project_name=os.path.basename(get_base_dir(mainfile 2))<line_sep>app_config.web_application_setting.update(web_application_setting)<if_stmt>app_setting.get('session_config')<block_start>app_config.session_config.update(app_setting['session_config'])<block_end>log.getLogger(**app_setting.log)<line_sep>_install_app(package_space)<block_end><def_stmt>register_url url handler name=<none> kwargs=<none><block_start>"""insert url into tornado application handlers group :arg str url: url :handler object handler: url mapping handler :name reverse url name :kwargs dict tornado handler initlize args """<if_stmt>name<is><none><and>kwargs<is><none><block_start>app_config.urls.append((url handler))<line_sep><return><block_end><if_stmt>name<is><none><block_start>app_config.urls.append((url handler kwargs))<line_sep><return><block_end>app_config.urls.append((url handler kwargs name))<block_end><def_stmt>register_group_urls prefix urls<block_start><for_stmt>item urls<block_start>url,handler=item[0:2]<line_sep>register_url(prefix+url handler *item[2:])<block_end><block_end>
<import_from_stmt>chainer cuda<import_from_stmt>chainer gradient_check<import_stmt>numpy<import_stmt>pytest<import_from_stmt>chainer_chemistry.links.connection.graph_mlp GraphMLP# NOQA in_size=3<line_sep>atom_size=5<line_sep>out_size=4<line_sep>channels=[16 out_size]<line_sep>batch_size=2<line_sep>@pytest.fixture<def_stmt>model <block_start>l=GraphMLP(channels in_channels=in_size)<line_sep>l.cleargrads()<line_sep><return>l<block_end>@pytest.fixture<def_stmt>data <block_start>x_data=numpy.random.uniform(-1 1 (batch_size atom_size in_size)).astype(numpy.float32)<line_sep>y_grad=numpy.random.uniform(-1 1 (batch_size atom_size out_size)).astype(numpy.float32)<line_sep><return>x_data y_grad<block_end><def_stmt>test_forward_cpu model data# only testing shape for now... <block_start>x_data=data[0]<line_sep>y_actual=model(x_data)<assert_stmt>y_actual.shape<eq>(batch_size atom_size out_size)<assert_stmt>len(model.layers)<eq>len(channels)<block_end>@pytest.mark.gpu<def_stmt>test_forward_gpu model data<block_start>x_data=cuda.to_gpu(data[0])<line_sep>model.to_gpu()<line_sep>y_actual=model(x_data)<assert_stmt>y_actual.shape<eq>(batch_size atom_size out_size)<assert_stmt>len(model.layers)<eq>len(channels)<block_end><def_stmt>test_backward_cpu model data<block_start>x_data,y_grad=data<line_sep>gradient_check.check_backward(model x_data y_grad list(model.params()) atol=1e-3 rtol=1e-3)<block_end>@pytest.mark.gpu<def_stmt>test_backward_gpu model data<block_start>x_data,y_grad=[cuda.to_gpu(d)<for>d data]<line_sep>model.to_gpu()<line_sep>gradient_check.check_backward(model x_data y_grad list(model.params()) atol=1e-3 rtol=1e-3)<block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main([__file__ '-v' '-s'])<block_end>
""" Script for running over all instances of MVMC. Since running the script can take a long time, it is possible to parallelize across different machines using the --index and --skip arguments. Examples: python scripts/mvmc_driver.py --mvmc_path data/mvmc --index 0 --skip 1 """<import_stmt>argparse<import_stmt>os<import_stmt>subprocess<import_from_stmt>tqdm.auto tqdm<def_stmt>get_parser <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--mvmc_path" type=str default="data/mvmc")<line_sep>parser.add_argument("--index" default=0 type=int help="Initial index to start at.")<line_sep>parser.add_argument("--skip" default=1 type=int help="Number of instances to skip at a time.")<line_sep>parser.add_argument("--force" action="store_true" help="Re-run even if output exists.")<line_sep><return>parser<block_end><def_stmt>main args<block_start>instance_ids=sorted(os.listdir(args.mvmc_path))<line_sep>base_cmd=["python" "main.py" "--mvmc" "--symmetrize" "--export-mesh" "--predict-illumination" ]<if_stmt>args.force<block_start>base_cmd.append("--force")<block_end><for_stmt>instance_id tqdm(instance_ids[args.index::args.skip])<block_start>cmd=base_cmd+["--instance-dir" os.path.join(args.mvmc_path instance_id) ]<line_sep>print("Running:" " ".join(cmd))<line_sep>subprocess.call(cmd)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>args=get_parser().parse_args()<line_sep>main(args)<block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['BillingAccountExclusionArgs' 'BillingAccountExclusion']<line_sep>@pulumi.input_type<class_stmt>BillingAccountExclusionArgs<block_start><def_stmt>__init__ __self__ * billing_account:pulumi.Input[str] filter:pulumi.Input[str] description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> name:Optional[pulumi.Input[str]]=<none><block_start>""" The set of arguments for constructing a BillingAccountExclusion resource. :param pulumi.Input[str] billing_account: The billing account to create the exclusion for. :param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. :param pulumi.Input[str] description: A human-readable description. :param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to false. :param pulumi.Input[str] name: The name of the logging exclusion. """<line_sep>pulumi.set(__self__ "billing_account" billing_account)<line_sep>pulumi.set(__self__ "filter" filter)<if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>disabled<is><not><none><block_start>pulumi.set(__self__ "disabled" disabled)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><block_end>@[email protected](name="billingAccount")<def_stmt>billing_account self<arrow>pulumi.Input[str]<block_start>""" The billing account to create the exclusion for. """<line_sep><return>pulumi.get(self "billing_account")<block_end>@billing_account.setter<def_stmt>billing_account self value:pulumi.Input[str]<block_start>pulumi.set(self "billing_account" value)<block_end>@[email protected]<def_stmt>filter self<arrow>pulumi.Input[str]<block_start>""" The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. """<line_sep><return>pulumi.get(self "filter")<block_end>@filter.setter<def_stmt>filter self value:pulumi.Input[str]<block_start>pulumi.set(self "filter" value)<block_end>@[email protected]<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" A human-readable description. """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@[email protected]<def_stmt>disabled self<arrow>Optional[pulumi.Input[bool]]<block_start>""" Whether this exclusion rule should be disabled or not. This defaults to false. """<line_sep><return>pulumi.get(self "disabled")<block_end>@disabled.setter<def_stmt>disabled self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "disabled" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The name of the logging exclusion. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end><block_end>@pulumi.input_type<class_stmt>_BillingAccountExclusionState<block_start><def_stmt>__init__ __self__ * billing_account:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> filter:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none><block_start>""" Input properties used for looking up and filtering BillingAccountExclusion resources. :param pulumi.Input[str] billing_account: The billing account to create the exclusion for. :param pulumi.Input[str] description: A human-readable description. :param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to false. :param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. :param pulumi.Input[str] name: The name of the logging exclusion. """<if_stmt>billing_account<is><not><none><block_start>pulumi.set(__self__ "billing_account" billing_account)<block_end><if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>disabled<is><not><none><block_start>pulumi.set(__self__ "disabled" disabled)<block_end><if_stmt>filter<is><not><none><block_start>pulumi.set(__self__ "filter" filter)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><block_end>@[email protected](name="billingAccount")<def_stmt>billing_account self<arrow>Optional[pulumi.Input[str]]<block_start>""" The billing account to create the exclusion for. """<line_sep><return>pulumi.get(self "billing_account")<block_end>@billing_account.setter<def_stmt>billing_account self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "billing_account" value)<block_end>@[email protected]<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" A human-readable description. """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@[email protected]<def_stmt>disabled self<arrow>Optional[pulumi.Input[bool]]<block_start>""" Whether this exclusion rule should be disabled or not. This defaults to false. """<line_sep><return>pulumi.get(self "disabled")<block_end>@disabled.setter<def_stmt>disabled self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "disabled" value)<block_end>@[email protected]<def_stmt>filter self<arrow>Optional[pulumi.Input[str]]<block_start>""" The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. """<line_sep><return>pulumi.get(self "filter")<block_end>@filter.setter<def_stmt>filter self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "filter" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The name of the logging exclusion. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end><block_end><class_stmt>BillingAccountExclusion(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> billing_account:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> filter:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>""" ## Import Billing account logging exclusions can be imported using their URI, e.g. ```sh $ pulumi import gcp:logging/billingAccountExclusion:BillingAccountExclusion my_exclusion billingAccounts/my-billing_account/exclusions/my-exclusion ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] billing_account: The billing account to create the exclusion for. :param pulumi.Input[str] description: A human-readable description. :param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to false. :param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. :param pulumi.Input[str] name: The name of the logging exclusion. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:BillingAccountExclusionArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" ## Import Billing account logging exclusions can be imported using their URI, e.g. ```sh $ pulumi import gcp:logging/billingAccountExclusion:BillingAccountExclusion my_exclusion billingAccounts/my-billing_account/exclusions/my-exclusion ``` :param str resource_name: The name of the resource. :param BillingAccountExclusionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(BillingAccountExclusionArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> billing_account:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> filter:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=BillingAccountExclusionArgs.__new__(BillingAccountExclusionArgs)<if_stmt>billing_account<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'billing_account'")<block_end>__props__.__dict__["billing_account"]=billing_account<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["disabled"]=disabled<if_stmt>filter<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'filter'")<block_end>__props__.__dict__["filter"]=filter<line_sep>__props__.__dict__["name"]=name<block_end>super(BillingAccountExclusion __self__).__init__('gcp:logging/billingAccountExclusion:BillingAccountExclusion' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> billing_account:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> filter:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none><arrow>'BillingAccountExclusion'<block_start>""" Get an existing BillingAccountExclusion resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] billing_account: The billing account to create the exclusion for. :param pulumi.Input[str] description: A human-readable description. :param pulumi.Input[bool] disabled: Whether this exclusion rule should be disabled or not. This defaults to false. :param pulumi.Input[str] filter: The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. :param pulumi.Input[str] name: The name of the logging exclusion. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_BillingAccountExclusionState.__new__(_BillingAccountExclusionState)<line_sep>__props__.__dict__["billing_account"]=billing_account<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["disabled"]=disabled<line_sep>__props__.__dict__["filter"]=filter<line_sep>__props__.__dict__["name"]=name<line_sep><return>BillingAccountExclusion(resource_name opts=opts __props__=__props__)<block_end>@[email protected](name="billingAccount")<def_stmt>billing_account self<arrow>pulumi.Output[str]<block_start>""" The billing account to create the exclusion for. """<line_sep><return>pulumi.get(self "billing_account")<block_end>@[email protected]<def_stmt>description self<arrow>pulumi.Output[Optional[str]]<block_start>""" A human-readable description. """<line_sep><return>pulumi.get(self "description")<block_end>@[email protected]<def_stmt>disabled self<arrow>pulumi.Output[Optional[bool]]<block_start>""" Whether this exclusion rule should be disabled or not. This defaults to false. """<line_sep><return>pulumi.get(self "disabled")<block_end>@[email protected]<def_stmt>filter self<arrow>pulumi.Output[str]<block_start>""" The filter to apply when excluding logs. Only log entries that match the filter are excluded. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced-filters) for information on how to write a filter. """<line_sep><return>pulumi.get(self "filter")<block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Output[str]<block_start>""" The name of the logging exclusion. """<line_sep><return>pulumi.get(self "name")<block_end><block_end>
expected_output={'switch':{"1":{'system_temperature_state':'ok' }}}<line_sep>
<import_from_stmt>lomond.status Status<def_stmt>test_constants <block_start>expected_constants={'BAD_DATA' 'DATA_NOT_UNDERSTOOD' 'EXTENSION_FAILED' 'GOING_AWAY' 'MESSAGE_TOO_LARGE' 'NORMAL' 'POLICY_VIOLATION' 'PROTOCOL_ERROR' 'UNEXPECTED_CONDITION'}<assert_stmt>expected_constants<eq>set(filter(<lambda>constant:constant.isupper() dir(Status)))<block_end>
<import_stmt>types<import_stmt>pytest<import_from_stmt>plenum.test.helper checkViewNoForNodes sdk_send_random_and_check countDiscarded<import_from_stmt>plenum.test.malicious_behaviors_node slow_primary<import_from_stmt>plenum.test.test_node getPrimaryReplica ensureElectionsDone<import_from_stmt>plenum.test.view_change.helper provoke_and_wait_for_view_change ensure_view_change<import_from_stmt>stp_core.common.log getlogger<line_sep>logger=getlogger()<def_stmt>test_master_primary_different_from_previous txnPoolNodeSet looper sdk_pool_handle sdk_wallet_client<block_start>""" After a view change, primary must be different from previous primary for master instance, it does not matter for other instance. The primary is benign and does not vote for itself. """<line_sep>pr=slow_primary(txnPoolNodeSet 0 delay=10)<line_sep>old_pr_node_name=pr.node.name<line_sep># View change happens ensure_view_change(looper txnPoolNodeSet)<line_sep>logger.debug("VIEW HAS BEEN CHANGED!")<line_sep># Elections done ensureElectionsDone(looper=looper nodes=txnPoolNodeSet)<line_sep># New primary is not same as old primary <assert_stmt>getPrimaryReplica(txnPoolNodeSet 0).node.name<ne>old_pr_node_name<line_sep>pr.outBoxTestStasher.resetDelays()<line_sep># The new primary can still process requests sdk_send_random_and_check(looper txnPoolNodeSet sdk_pool_handle sdk_wallet_client 5)<block_end>
# -*- coding: utf-8 -*- VOICE_DATA=[{"Id":"Joanna" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Female" "Name":"Joanna" } {"Id":"Mizuki" "LanguageCode":"ja-JP" "LanguageName":"Japanese" "Gender":"Female" "Name":"Mizuki" } {"Id":"Filiz" "LanguageCode":"tr-TR" "LanguageName":"Turkish" "Gender":"Female" "Name":"Filiz" } {"Id":"Astrid" "LanguageCode":"sv-SE" "LanguageName":"Swedish" "Gender":"Female" "Name":"Astrid" } {"Id":"Tatyana" "LanguageCode":"ru-RU" "LanguageName":"Russian" "Gender":"Female" "Name":"Tatyana" } {"Id":"Maxim" "LanguageCode":"ru-RU" "LanguageName":"Russian" "Gender":"Male" "Name":"Maxim" } {"Id":"Carmen" "LanguageCode":"ro-RO" "LanguageName":"Romanian" "Gender":"Female" "Name":"Carmen" } {"Id":"Ines" "LanguageCode":"pt-PT" "LanguageName":"Portuguese" "Gender":"Female" "Name":"Inês" } {"Id":"Cristiano" "LanguageCode":"pt-PT" "LanguageName":"Portuguese" "Gender":"Male" "Name":"Cristiano" } {"Id":"Vitoria" "LanguageCode":"pt-BR" "LanguageName":"Brazilian Portuguese" "Gender":"Female" "Name":"Vitória" } {"Id":"Ricardo" "LanguageCode":"pt-BR" "LanguageName":"Brazilian Portuguese" "Gender":"Male" "Name":"Ricardo" } {"Id":"Maja" "LanguageCode":"pl-PL" "LanguageName":"Polish" "Gender":"Female" "Name":"Maja" } {"Id":"Jan" "LanguageCode":"pl-PL" "LanguageName":"Polish" "Gender":"Male" "Name":"Jan" } {"Id":"Ewa" "LanguageCode":"pl-PL" "LanguageName":"Polish" "Gender":"Female" "Name":"Ewa" } {"Id":"Ruben" "LanguageCode":"nl-NL" "LanguageName":"Dutch" "Gender":"Male" "Name":"Ruben" } {"Id":"Lotte" "LanguageCode":"nl-NL" "LanguageName":"Dutch" "Gender":"Female" "Name":"Lotte" } {"Id":"Liv" "LanguageCode":"nb-NO" "LanguageName":"Norwegian" "Gender":"Female" "Name":"Liv" } {"Id":"Giorgio" "LanguageCode":"it-IT" "LanguageName":"Italian" "Gender":"Male" "Name":"Giorgio" } {"Id":"Carla" "LanguageCode":"it-IT" "LanguageName":"Italian" "Gender":"Female" "Name":"Carla" } {"Id":"Karl" "LanguageCode":"is-IS" "LanguageName":"Icelandic" "Gender":"Male" "Name":"Karl" } {"Id":"Dora" "LanguageCode":"is-IS" "LanguageName":"Icelandic" "Gender":"Female" "Name":"Dóra" } {"Id":"Mathieu" "LanguageCode":"fr-FR" "LanguageName":"French" "Gender":"Male" "Name":"Mathieu" } {"Id":"Celine" "LanguageCode":"fr-FR" "LanguageName":"French" "Gender":"Female" "Name":"Céline" } {"Id":"Chantal" "LanguageCode":"fr-CA" "LanguageName":"Canadian French" "Gender":"Female" "Name":"Chantal" } {"Id":"Penelope" "LanguageCode":"es-US" "LanguageName":"US Spanish" "Gender":"Female" "Name":"Penélope" } {"Id":"Miguel" "LanguageCode":"es-US" "LanguageName":"US Spanish" "Gender":"Male" "Name":"Miguel" } {"Id":"Enrique" "LanguageCode":"es-ES" "LanguageName":"Castilian Spanish" "Gender":"Male" "Name":"Enrique" } {"Id":"Conchita" "LanguageCode":"es-ES" "LanguageName":"Castilian Spanish" "Gender":"Female" "Name":"Conchita" } {"Id":"Geraint" "LanguageCode":"en-GB-WLS" "LanguageName":"Welsh English" "Gender":"Male" "Name":"Geraint" } {"Id":"Salli" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Female" "Name":"Salli" } {"Id":"Kimberly" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Female" "Name":"Kimberly" } {"Id":"Kendra" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Female" "Name":"Kendra" } {"Id":"Justin" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Male" "Name":"Justin" } {"Id":"Joey" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Male" "Name":"Joey" } {"Id":"Ivy" "LanguageCode":"en-US" "LanguageName":"US English" "Gender":"Female" "Name":"Ivy" } {"Id":"Raveena" "LanguageCode":"en-IN" "LanguageName":"Indian English" "Gender":"Female" "Name":"Raveena" } {"Id":"Emma" "LanguageCode":"en-GB" "LanguageName":"British English" "Gender":"Female" "Name":"Emma" } {"Id":"Brian" "LanguageCode":"en-GB" "LanguageName":"British English" "Gender":"Male" "Name":"Brian" } {"Id":"Amy" "LanguageCode":"en-GB" "LanguageName":"British English" "Gender":"Female" "Name":"Amy" } {"Id":"Russell" "LanguageCode":"en-AU" "LanguageName":"Australian English" "Gender":"Male" "Name":"Russell" } {"Id":"Nicole" "LanguageCode":"en-AU" "LanguageName":"Australian English" "Gender":"Female" "Name":"Nicole" } {"Id":"Vicki" "LanguageCode":"de-DE" "LanguageName":"German" "Gender":"Female" "Name":"Vicki" } {"Id":"Marlene" "LanguageCode":"de-DE" "LanguageName":"German" "Gender":"Female" "Name":"Marlene" } {"Id":"Hans" "LanguageCode":"de-DE" "LanguageName":"German" "Gender":"Male" "Name":"Hans" } {"Id":"Naja" "LanguageCode":"da-DK" "LanguageName":"Danish" "Gender":"Female" "Name":"Naja" } {"Id":"Mads" "LanguageCode":"da-DK" "LanguageName":"Danish" "Gender":"Male" "Name":"Mads" } {"Id":"Gwyneth" "LanguageCode":"cy-GB" "LanguageName":"Welsh" "Gender":"Female" "Name":"Gwyneth" } {"Id":"Jacek" "LanguageCode":"pl-PL" "LanguageName":"Polish" "Gender":"Male" "Name":"Jacek" } ]<line_sep># {...} is also shorthand set syntax LANGUAGE_CODES={"cy-GB" "da-DK" "de-DE" "en-AU" "en-GB" "en-GB-WLS" "en-IN" "en-US" "es-ES" "es-US" "fr-CA" "fr-FR" "is-IS" "it-IT" "ja-JP" "nb-NO" "nl-NL" "pl-PL" "pt-BR" "pt-PT" "ro-RO" "ru-RU" "sv-SE" "tr-TR" }<line_sep>VOICE_IDS={"Geraint" "Gwyneth" "Mads" "Naja" "Hans" "Marlene" "Nicole" "Russell" "Amy" "Brian" "Emma" "Raveena" "Ivy" "Joanna" "Joey" "Justin" "Kendra" "Kimberly" "Salli" "Conchita" "Enrique" "Miguel" "Penelope" "Chantal" "Celine" "Mathieu" "Dora" "Karl" "Carla" "Giorgio" "Mizuki" "Liv" "Lotte" "Ruben" "Ewa" "Jacek" "Jan" "Maja" "Ricardo" "Vitoria" "Cristiano" "Ines" "Carmen" "Maxim" "Tatyana" "Astrid" "Filiz" }<line_sep>
# Generated by Django 3.2.6 on 2021-09-17 10:52 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("corporate" "0013_alter_zulipsponsorshiprequest_org_website") ]<line_sep>operations=[migrations.AddField(model_name="customerplan" name="end_date" field=models.DateTimeField(null=<true>) ) ]<block_end>
# Zed Attack Proxy (ZAP) and its related class files. # # ZAP is an HTTP/HTTPS proxy for assessing web application security. # # Copyright 2017 the ZAP development team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file was automatically generated. """<import_stmt>six<class_stmt>ajaxSpider(object)<block_start><def_stmt>__init__ self zap<block_start>self.zap=zap<block_end>@property<def_stmt>allowed_resources self<block_start>""" Gets the allowed resources. The allowed resources are always fetched even if out of scope, allowing to include necessary resources (e.g. scripts) from 3rd-parties. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/allowedResources/')))<block_end>@property<def_stmt>status self<block_start>""" Gets the current status of the crawler. Actual values are Stopped and Running. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/status/')))<block_end><def_stmt>results self start=<none> count=<none><block_start>""" Gets the current results of the crawler. This component is optional and therefore the API will only work if it is installed """<line_sep>params={}<if_stmt>start<is><not><none><block_start>params['start']=start<block_end><if_stmt>count<is><not><none><block_start>params['count']=count<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/results/' params)))<block_end>@property<def_stmt>number_of_results self<block_start>""" Gets the number of resources found. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/numberOfResults/')))<block_end>@property<def_stmt>full_results self<block_start>""" Gets the full crawled content detected by the AJAX Spider. Returns a set of values based on 'inScope' URLs, 'outOfScope' URLs, and 'errors' encountered during the last/current run of the AJAX Spider. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/fullResults/')))<block_end>@property<def_stmt>option_browser_id self<block_start>""" Gets the configured browser to use for crawling. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionBrowserId/')))<block_end>@property<def_stmt>option_event_wait self<block_start>""" Gets the time to wait after an event (in milliseconds). For example: the wait delay after the cursor hovers over an element, in order for a menu to display, etc. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionEventWait/')))<block_end>@property<def_stmt>option_max_crawl_depth self<block_start>""" Gets the configured value for the max crawl depth. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionMaxCrawlDepth/')))<block_end>@property<def_stmt>option_max_crawl_states self<block_start>""" Gets the configured value for the maximum crawl states allowed. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionMaxCrawlStates/')))<block_end>@property<def_stmt>option_max_duration self<block_start>""" Gets the configured max duration of the crawl, the value is in minutes. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionMaxDuration/')))<block_end>@property<def_stmt>option_number_of_browsers self<block_start>""" Gets the configured number of browsers to be used. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionNumberOfBrowsers/')))<block_end>@property<def_stmt>option_reload_wait self<block_start>""" Gets the configured time to wait after reloading the page, this value is in milliseconds. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionReloadWait/')))<block_end>@property<def_stmt>option_click_default_elems self<block_start>""" Gets the configured value for 'Click Default Elements Only', HTML elements such as 'a', 'button', 'input', all associated with some action or links on the page. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionClickDefaultElems/')))<block_end>@property<def_stmt>option_click_elems_once self<block_start>""" Gets the value configured for the AJAX Spider to know if it should click on the elements only once. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionClickElemsOnce/')))<block_end>@property<def_stmt>option_random_inputs self<block_start>""" Gets if the AJAX Spider will use random values in form fields when crawling, if set to true. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/view/optionRandomInputs/')))<block_end><def_stmt>scan self url=<none> inscope=<none> contextname=<none> subtreeonly=<none> apikey=''<block_start>""" Runs the AJAX Spider against a given target. This component is optional and therefore the API will only work if it is installed """<line_sep>params={'apikey':apikey}<if_stmt>url<is><not><none><block_start>params['url']=url<block_end><if_stmt>inscope<is><not><none><block_start>params['inScope']=inscope<block_end><if_stmt>contextname<is><not><none><block_start>params['contextName']=contextname<block_end><if_stmt>subtreeonly<is><not><none><block_start>params['subtreeOnly']=subtreeonly<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/scan/' params)))<block_end><def_stmt>scan_as_user self contextname username url=<none> subtreeonly=<none> apikey=''<block_start>""" Runs the AJAX Spider from the perspective of a User of the web application. This component is optional and therefore the API will only work if it is installed """<line_sep>params={'contextName':contextname 'userName':username 'apikey':apikey}<if_stmt>url<is><not><none><block_start>params['url']=url<block_end><if_stmt>subtreeonly<is><not><none><block_start>params['subtreeOnly']=subtreeonly<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/scanAsUser/' params)))<block_end><def_stmt>stop self apikey=''<block_start>""" Stops the AJAX Spider. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/stop/' {'apikey':apikey})))<block_end><def_stmt>add_allowed_resource self regex enabled=<none> apikey=''<block_start>""" Adds an allowed resource. This component is optional and therefore the API will only work if it is installed """<line_sep>params={'regex':regex 'apikey':apikey}<if_stmt>enabled<is><not><none><block_start>params['enabled']=enabled<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/addAllowedResource/' params)))<block_end><def_stmt>remove_allowed_resource self regex apikey=''<block_start>""" Removes an allowed resource. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/removeAllowedResource/' {'regex':regex 'apikey':apikey})))<block_end><def_stmt>set_enabled_allowed_resource self regex enabled apikey=''<block_start>""" Sets whether or not an allowed resource is enabled. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setEnabledAllowedResource/' {'regex':regex 'enabled':enabled 'apikey':apikey})))<block_end><def_stmt>set_option_browser_id self string apikey=''<block_start>""" Sets the configuration of the AJAX Spider to use one of the supported browsers. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionBrowserId/' {'String':string 'apikey':apikey})))<block_end><def_stmt>set_option_click_default_elems self boolean apikey=''<block_start>""" Sets whether or not the the AJAX Spider will only click on the default HTML elements. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionClickDefaultElems/' {'Boolean':boolean 'apikey':apikey})))<block_end><def_stmt>set_option_click_elems_once self boolean apikey=''<block_start>""" When enabled, the crawler attempts to interact with each element (e.g., by clicking) only once. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionClickElemsOnce/' {'Boolean':boolean 'apikey':apikey})))<block_end><def_stmt>set_option_event_wait self integer apikey=''<block_start>""" Sets the time to wait after an event (in milliseconds). For example: the wait delay after the cursor hovers over an element, in order for a menu to display, etc. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionEventWait/' {'Integer':integer 'apikey':apikey})))<block_end><def_stmt>set_option_max_crawl_depth self integer apikey=''<block_start>""" Sets the maximum depth that the crawler can reach. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionMaxCrawlDepth/' {'Integer':integer 'apikey':apikey})))<block_end><def_stmt>set_option_max_crawl_states self integer apikey=''<block_start>""" Sets the maximum number of states that the crawler should crawl. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionMaxCrawlStates/' {'Integer':integer 'apikey':apikey})))<block_end><def_stmt>set_option_max_duration self integer apikey=''<block_start>""" The maximum time that the crawler is allowed to run. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionMaxDuration/' {'Integer':integer 'apikey':apikey})))<block_end><def_stmt>set_option_number_of_browsers self integer apikey=''<block_start>""" Sets the number of windows to be used by AJAX Spider. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionNumberOfBrowsers/' {'Integer':integer 'apikey':apikey})))<block_end><def_stmt>set_option_random_inputs self boolean apikey=''<block_start>""" When enabled, inserts random values into form fields. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionRandomInputs/' {'Boolean':boolean 'apikey':apikey})))<block_end><def_stmt>set_option_reload_wait self integer apikey=''<block_start>""" Sets the time to wait after the page is loaded before interacting with it. This component is optional and therefore the API will only work if it is installed """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'ajaxSpider/action/setOptionReloadWait/' {'Integer':integer 'apikey':apikey})))<block_end><block_end>
<import_from_stmt>plugin.core.environment translate<as>_<import_from_stmt>plugin.managers.account TraktAccountManager<import_from_stmt>plugin.models TraktAccount<import_from_stmt>plugin.preferences.options.core.base SimpleOption<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<class_stmt>PinOption(SimpleOption)<block_start>type='string'<line_sep>group=(_('Authentication') )<line_sep>label=_('Authentication PIN')<line_sep>preference='pin'<def_stmt>on_database_changed self value account=<none># Update preference <block_start><return>self._update_preference(value account)<block_end><def_stmt>on_plex_changed self value account<block_start><if_stmt><not>value# Ignore empty PIN field <block_start><return><none><block_end># Retrieve administrator account trakt_account=TraktAccountManager.get(TraktAccount.account<eq>account)<line_sep># Update administrator authorization <if_stmt><not>TraktAccountManager.update.from_pin(trakt_account value)<block_start>log.warn('Unable to update account')<line_sep><return><none><block_end><return>value<block_end><block_end>
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for a simple convnet with clusterable layer on the MNIST dataset."""<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_model_optimization.python.core.clustering.keras cluster<import_from_stmt>tensorflow_model_optimization.python.core.clustering.keras cluster_config<import_from_stmt>tensorflow_model_optimization.python.core.clustering.keras clusterable_layer<import_from_stmt>tensorflow_model_optimization.python.core.clustering.keras clustering_algorithm<line_sep>tf.random.set_seed(42)<line_sep>keras=tf.keras<line_sep>EPOCHS=7<line_sep>EPOCHS_FINE_TUNING=4<line_sep>NUMBER_OF_CLUSTERS=8<class_stmt>MyDenseLayer(keras.layers.Dense clusterable_layer.ClusterableLayer)<block_start><def_stmt>get_clusterable_weights self# Cluster kernel and bias. <block_start><return>[('kernel' self.kernel) ('bias' self.bias)]<block_end><block_end><class_stmt>ClusterableWeightsCA(clustering_algorithm.ClusteringAlgorithm)<block_start>"""This class provides a special lookup function for the the weights 'w'. It reshapes and tile centroids the same way as the weights. This allows us to find pulling indices efficiently. """<def_stmt>get_pulling_indices self weight<block_start>clst_num=self.cluster_centroids.shape[0]<line_sep>tiled_weights=tf.tile(tf.expand_dims(weight axis=2) [1 1 clst_num])<line_sep>tiled_cluster_centroids=tf.tile(tf.reshape(self.cluster_centroids [1 1 clst_num]) [weight.shape[0] weight.shape[1] 1])<line_sep># We find the nearest cluster centroids and store them so that ops can build # their kernels upon it pulling_indices=tf.argmin(tf.abs(tiled_weights-tiled_cluster_centroids) axis=2)<line_sep><return>pulling_indices<block_end><block_end><class_stmt>MyClusterableLayer(keras.layers.Layer clusterable_layer.ClusterableLayer)<block_start><def_stmt>__init__ self units=32 **kwargs<block_start>super(MyClusterableLayer self).__init__(**kwargs)<line_sep>self.units=units<block_end><def_stmt>build self input_shape<block_start>self.w=self.add_weight(shape=(input_shape[-1] self.units) initializer='random_normal' trainable=<true> )<line_sep>self.b=self.add_weight(shape=(self.units ) initializer='random_normal' trainable=<false> )<line_sep>self.built=<true><block_end><def_stmt>call self inputs<block_start><return>tf.matmul(inputs self.w)+self.b<block_end><def_stmt>get_config self<block_start>config=super(MyClusterableLayer self).get_config()<line_sep>config.update({'units':self.units})<line_sep><return>config<block_end><def_stmt>get_clusterable_weights self# Cluster only weights 'w' <block_start><return>[('w' self.w)]<block_end><def_stmt>get_clusterable_algorithm self weight_name<block_start>"""Returns clustering algorithm for the custom weights 'w'."""<if_stmt>weight_name<eq>'w'<block_start><return>ClusterableWeightsCA<block_end><else_stmt># We don't cluster other weights. <block_start><return><none><block_end><block_end><block_end><def_stmt>_build_model <block_start>"""Builds model with MyDenseLayer."""<line_sep>i=tf.keras.layers.Input(shape=(28 28) name='input')<line_sep>x=tf.keras.layers.Reshape((28 28 1))(i)<line_sep>x=tf.keras.layers.Conv2D(filters=12 kernel_size=(3 3) activation='relu' name='conv1')(x)<line_sep>x=tf.keras.layers.MaxPool2D(2 2)(x)<line_sep>x=tf.keras.layers.Flatten()(x)<line_sep>output=MyDenseLayer(units=10)(x)<line_sep>model=tf.keras.Model(inputs=[i] outputs=[output])<line_sep><return>model<block_end><def_stmt>_build_model_2 <block_start>"""Builds model with MyClusterableLayer layer."""<line_sep>i=tf.keras.layers.Input(shape=(28 28) name='input')<line_sep>x=tf.keras.layers.Reshape((28 28 1))(i)<line_sep>x=tf.keras.layers.Conv2D(filters=12 kernel_size=(3 3) activation='relu' name='conv1')(x)<line_sep>x=tf.keras.layers.MaxPool2D(2 2)(x)<line_sep>x=tf.keras.layers.Flatten()(x)<line_sep>output=MyClusterableLayer(units=10)(x)<line_sep>model=tf.keras.Model(inputs=[i] outputs=[output])<line_sep><return>model<block_end><def_stmt>_get_dataset <block_start>mnist=tf.keras.datasets.mnist<line_sep>(x_train y_train),(x_test y_test)=mnist.load_data()<line_sep>x_train,x_test=x_train/255.0 x_test/255.0<line_sep># Use subset of 60000 examples to keep unit test speed fast. x_train=x_train[0:1000]<line_sep>y_train=y_train[0:1000]<line_sep><return>(x_train y_train) (x_test y_test)<block_end><def_stmt>_train_model model<block_start>loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<true>)<line_sep>model.compile(optimizer='adam' loss=loss_fn metrics=['accuracy'])<line_sep>(x_train y_train),_=_get_dataset()<line_sep>model.fit(x_train y_train epochs=EPOCHS)<block_end><def_stmt>_cluster_model model number_of_clusters<block_start>(x_train y_train),_=_get_dataset()<line_sep>clustering_params={'number_of_clusters':number_of_clusters 'cluster_centroids_init':cluster_config.CentroidInitialization.DENSITY_BASED}<line_sep># Cluster model clustered_model=cluster.cluster_weights(model **clustering_params)<line_sep># Use smaller learning rate for fine-tuning # clustered model opt=tf.keras.optimizers.Adam(learning_rate=1e-5)<line_sep>clustered_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<true>) optimizer=opt metrics=['accuracy'])<line_sep># Fine-tune clustered model clustered_model.fit(x_train y_train epochs=EPOCHS_FINE_TUNING)<line_sep>stripped_model=cluster.strip_clustering(clustered_model)<line_sep>stripped_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<true>) optimizer=opt metrics=['accuracy'])<line_sep><return>stripped_model<block_end><def_stmt>_get_number_of_unique_weights stripped_model layer_nr weight_name<block_start>layer=stripped_model.layers[layer_nr]<line_sep>weight=getattr(layer weight_name)<line_sep>weights_as_list=weight.numpy().reshape(-1 ).tolist()<line_sep>nr_of_unique_weights=len(set(weights_as_list))<line_sep><return>nr_of_unique_weights<block_end><class_stmt>FunctionalTest(tf.test.TestCase)<block_start><def_stmt>testMnistMyDenseLayer self<block_start>"""Test model with a custom clusterable layer derived from Dense. This customerable layer (see MyDenseLayer definition above) provides the function get_clusterable_weights() so that both 'kernel' weights as well as 'bias' weights are clustered. """<line_sep>model=_build_model()<line_sep>_train_model(model)<line_sep># Checks that number of original weights('kernel') is greater than # the number of clusters nr_of_unique_weights=_get_number_of_unique_weights(model -1 'kernel')<line_sep>self.assertGreater(nr_of_unique_weights NUMBER_OF_CLUSTERS)<line_sep># Checks that number of original weights('bias') is greater than # the number of clusters nr_of_unique_weights=_get_number_of_unique_weights(model -1 'bias')<line_sep>self.assertGreater(nr_of_unique_weights NUMBER_OF_CLUSTERS)<line_sep>_,(x_test y_test)=_get_dataset()<line_sep>results_original=model.evaluate(x_test y_test)<line_sep>self.assertGreater(results_original[1] 0.8)<line_sep>clustered_model=_cluster_model(model NUMBER_OF_CLUSTERS)<line_sep>results=clustered_model.evaluate(x_test y_test)<line_sep>self.assertGreater(results[1] 0.8)<line_sep># checks 'kernel' weights of the last layer: MyDenseLayer nr_of_unique_weights=_get_number_of_unique_weights(clustered_model -1 'kernel')<line_sep>self.assertLessEqual(nr_of_unique_weights NUMBER_OF_CLUSTERS)<line_sep># checks 'bias' weights of the last layer: MyDenseLayer nr_of_unique_weights=_get_number_of_unique_weights(clustered_model -1 'bias')<line_sep>self.assertLessEqual(nr_of_unique_weights NUMBER_OF_CLUSTERS)<block_end><def_stmt>testMnistClusterableLayer self<block_start>"""Test keras custom layer. We test the keras custom layer with the provided clustering algorithm (see MyClusterableLayer above). We cluster only 'w' weights and the class ClusterableWeightsCA provides the function get_pulling_indices for the layer-out of 'w' weights. We skip evaluation in this test as it takes some time. """<line_sep>model=_build_model_2()<line_sep>_train_model(model)<line_sep># Checks that number of original weights 'w' is greater than # the number of clusters. nr_of_unique_weights=_get_number_of_unique_weights(model -1 'w')<line_sep>self.assertGreater(nr_of_unique_weights NUMBER_OF_CLUSTERS)<line_sep>clustered_model=_cluster_model(model NUMBER_OF_CLUSTERS)<line_sep># Checks clustered weights 'w'. nr_of_unique_weights=_get_number_of_unique_weights(clustered_model -1 'w')<line_sep>self.assertLessEqual(nr_of_unique_weights NUMBER_OF_CLUSTERS)<line_sep># Train again normally for sanity check _train_model(clustered_model)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
""" Port to Version 2 with implementation of v2-specific changes. Uses: ESMValTool v2, Python3.x <NAME>, UREAD, July 2018 Porting replicates the functionality to minimum errors. Original Description from Version 1 Diagnostic: ;;########################################################################### ;; AutoAssess_radiation_rms.py ;;########################################################################### ;; Description ;; This script is the RMS error metric script of ;; AutoAssess radiation ;; ########################################################################### This diagnostic uses CMIP5 data; to switch to CMIP6 change _CMIP_TYPE. """<import_stmt>os<import_stmt>logging<import_stmt>iris<import_from_stmt>esmvaltool.diag_scripts.autoassess._rms_radiation start end calc_all <import_from_stmt>esmvaltool.diag_scripts.autoassess._valmod_radiation perform_equation <import_from_stmt>esmvaltool.diag_scripts.shared group_metadata run_diagnostic get_control_exper_obs apply_supermeans <line_sep>logger=logging.getLogger(os.path.basename(__file__))<line_sep>_CMIP_TYPE='CMIP5'<def_stmt>apply_rms data_1 data_2 cfg component_dict var_name<block_start>"""Compute RMS for any data1-2 combination."""<line_sep>data_names=[model['dataset']<for>model component_dict.values()]<line_sep>plot_title=var_name+': '+data_names[0]+' vs '+data_names[1]<line_sep>rms_list=start(data_names[0] data_names[1])<line_sep>analysis_type=cfg['analysis_type']<line_sep>landsea_mask_file=os.path.join(os.path.dirname(__file__) 'autoassess_source' cfg['landsea_mask'])<line_sep>landsea_mask_cube=iris.load_cube(landsea_mask_file)<line_sep>data1_vs_data2=perform_equation(data_1 data_2 analysis_type)<line_sep># call to rms.calc_all() to compute rms; rms.end() to write results calc_all(rms_list data1_vs_data2 landsea_mask_cube plot_title)<line_sep>end(rms_list cfg['work_dir'])<block_end><def_stmt>do_preamble cfg<block_start>"""Execute some preamble functionality."""<line_sep># get data input_data=cfg['input_data'].values()<line_sep>grouped_input_data=group_metadata(input_data 'short_name' sort='dataset')<line_sep><return>input_data grouped_input_data<block_end><def_stmt>main cfg<block_start>"""Execute the radiation rms diag."""<line_sep>logger.setLevel(cfg['log_level'].upper())<line_sep>input_data,grouped_input_data=do_preamble(cfg)<line_sep># select variables and their corresponding # obs files <for_stmt>short_name grouped_input_data<block_start>logger.info("Processing variable %s" short_name)<line_sep># control, experiment and obs's ctrl,exper,obslist=get_control_exper_obs(short_name input_data cfg _CMIP_TYPE)<line_sep># apply the supermeans ctrl_sm,exper_sm,obs_sm_list=apply_supermeans(ctrl exper obslist)<line_sep># assemble a dict that contains various params depending # on the data combinations for RMS computations # control-experiment data_component_dict={'ct-ex':{'ctrl':ctrl 'exper':exper}}<line_sep>logger.info("Computing CONTROL-EXPERIMENT RMS...")<line_sep>apply_rms(ctrl_sm exper_sm cfg data_component_dict['ct-ex'] short_name)<if_stmt>obs_sm_list<block_start><for_stmt>obs,obsfile zip(obs_sm_list obslist)<block_start>data_component_dict={'ct-obs':{'ctrl':ctrl 'obs':obsfile} 'ex-obs':{'exper':exper 'obs':obsfile}}<line_sep># ctrl-obs logger.info("Computing CONTROL-OBS RMS...")<line_sep>apply_rms(ctrl_sm obs cfg data_component_dict['ct-obs'] short_name)<line_sep># exper-obs logger.info("Computing EXPERIMENT-OBS RMS...")<line_sep>apply_rms(exper_sm obs cfg data_component_dict['ex-obs'] short_name)<block_end><block_end><else_stmt># only ctrl-exper <block_start>data_component_dict={'ct-ex':{'ctrl':ctrl 'exper':exper}}<line_sep>logger.info("Computing CONTROL-EXPERIMENT RMS...")<line_sep>apply_rms(ctrl_sm exper_sm cfg data_component_dict['ct-ex'] short_name)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><with_stmt>run_diagnostic()<as>config<block_start>main(config)<block_end><block_end>
""" Module cannot be called tool_shed, because this conflicts with lib/tool_shed also at top level of path. """<line_sep>
<import_from_stmt>.scale_x_continuous scale_x_continuous<as>scale_x_discrete<line_sep>
"""Library implementing the data augmentations. """<import_stmt>numpy<as>np<import_stmt>skimage.io<import_stmt>skimage.transform<import_from_stmt>custom_warnings deprecated<line_sep>tform_identity=skimage.transform.AffineTransform()<line_sep>NO_AUGMENT_PARAMS={"zoom_x":1.0 "zoom_y":1.0 "rotate":0.0 "shear":0.0 "skew_x":0.0 "skew_y":0.0 "translate_x":0.0 "translate_y":0.0 "flip_vert":0.0 "roll_time":0.0 "flip_time":0.0 "change_brightness":0.0 }<def_stmt>resize_to_make_it_fit images output_shape=(50 50)<block_start>"""Resizes the images to a given shape. """<line_sep>max_time=max(images[i].shape[0]<for>i xrange(len(images)))<line_sep>final_shape=(len(images) max_time)+output_shape<line_sep>result=np.zeros(final_shape dtype="float32")<line_sep>volume_change=[]<line_sep>#result.reshape((final_shape[0],-1) + output_shape) <for_stmt>i,mri_slice enumerate(images)<block_start>mri_slice=mri_slice.reshape((-1 )+mri_slice.shape[-2:])<line_sep>scaling=max(mri_slice[0].shape[-2]/output_shape[-2] mri_slice[0].shape[-1]/output_shape[-1])<line_sep>tform=build_rescale_transform(scaling mri_slice[0].shape[-2:] target_shape=output_shape)<for_stmt>j,frame enumerate(mri_slice)# TODO: can't this be done better? <block_start>result[i j]=fast_warp(frame tform output_shape=output_shape)<block_end>A=tform.params[:2 :2]<line_sep>volume_change.append(np.linalg.norm(A[: 0])<times>np.linalg.norm(A[: 1]))<assert_stmt>tform.params[2 2]<eq>1 (tform.params[2 2] )<block_end>#result.reshape(final_shape) <return>result volume_change<block_end>@deprecated<def_stmt>normscale_resize_and_augment slices output_shape=(50 50) augment=<none> pixel_spacing=(1 1) shift_center=(.4 .5) normalised_patch_size=(200 200)<block_start>"""Normalizes the scale, augments, and crops the image. WARNING: This function contains bugs. We kept it around to ensure older models would still behave in the same way. Use normscale_resize_and_augment_2 instead. """<if_stmt><not>pixel_spacing[0]<eq>pixel_spacing[1]<block_start><raise>NotImplementedError("Only supports square pixels")<block_end># No augmentation: <if_stmt>augment<is><none><block_start>augment=NO_AUGMENT_PARAMS<block_end>current_shape=slices[0].shape[-2:]<line_sep>normalised_shape=tuple(int(float(d)<times>ps)<for>d,ps zip(current_shape pixel_spacing))<line_sep>max_time=max(slices[i].shape[0]<for>i xrange(len(slices)))<line_sep>final_shape=(len(slices) max_time)+output_shape<line_sep>result=np.zeros(final_shape dtype="float32")<for_stmt>i,mri_slice enumerate(slices)# For each slice, build a transformation that extracts the right patch, # and augments the data. # First, we scale the images such that they all have the same scale <block_start>norm_rescaling=1./pixel_spacing[0]<line_sep>tform_normscale=build_rescale_transform(norm_rescaling mri_slice[0].shape[-2:] target_shape=normalised_shape)<line_sep># Next, we shift the center of the image to the left (assumes upside_up normalisation) tform_shift_center,tform_shift_uncenter=(build_shift_center_transform(normalised_shape shift_center normalised_patch_size))<line_sep># zooming is OK augment_tform=build_augmentation_transform(**augment)<line_sep>patch_scale=max(normalised_patch_size[0]/output_shape[0] normalised_patch_size[1]/output_shape[1])<line_sep>tform_patch_scale=build_rescale_transform(patch_scale normalised_patch_size target_shape=output_shape)<line_sep># x and y axis transform total_tform=tform_patch_scale+tform_shift_uncenter+augment_tform+tform_shift_center+tform_normscale<line_sep># Time axis transform t_map=range(mri_slice.shape[0])<if_stmt>"roll_time"<in>augment<block_start>t_map=np.roll(t_map int(np.floor(augment["roll_time"])))<block_end><if_stmt>"flip_time"<in>augment<and>augment["flip_time"]<g>0.5<block_start>t_map=t_map[::-1]<block_end><for_stmt>j,frame enumerate(mri_slice)<block_start>j_shifted=t_map[j]<line_sep>result[i j_shifted]=fast_warp(frame total_tform output_shape=output_shape)<block_end><block_end><return>result<block_end>NRMSC_DEFAULT_SHIFT_CENTER=(.4 .5)<def_stmt>normscale_resize_and_augment_2 slices output_shape=(50 50) augment=<none> pixel_spacing=(1 1) shift_center=(<none> <none>) normalised_patch_size=(200 200)<block_start>"""Normalizes the scale, augments, and crops the image. """<if_stmt><not>pixel_spacing[0]<eq>pixel_spacing[1]<block_start><raise>NotImplementedError("Only supports square pixels")<block_end><if_stmt>shift_center<eq>(<none> <none>)<block_start>shift_center=NRMSC_DEFAULT_SHIFT_CENTER<block_end># No augmentation: <if_stmt>augment<is><none><block_start>augment=NO_AUGMENT_PARAMS<block_end>current_shape=slices[0].shape[-2:]<line_sep>normalised_shape=tuple(int(float(d)<times>ps)<for>d,ps zip(current_shape pixel_spacing))<line_sep>max_time=max(slices[i].shape[0]<for>i xrange(len(slices)))<line_sep>final_shape=(len(slices) max_time)+output_shape<line_sep>result=np.zeros(final_shape dtype="float32")<for_stmt>i,mri_slice enumerate(slices)# For each slice, build a transformation that extracts the right patch, # and augments the data. # First, we scale the images such that they all have the same scale <block_start>norm_rescaling=1./pixel_spacing[0]<line_sep>tform_normscale=build_rescale_transform(norm_rescaling mri_slice[0].shape[-2:] target_shape=normalised_shape)<line_sep># Next, we shift the center of the image to the left (assumes upside_up normalisation) tform_shift_center,tform_shift_uncenter=(build_shift_center_transform(normalised_shape shift_center normalised_patch_size))<line_sep>augment_tform=build_augmentation_transform(**augment)<line_sep>patch_scale=max(float(normalised_patch_size[0])/output_shape[0] float(normalised_patch_size[1])/output_shape[1])<line_sep>tform_patch_scale=build_rescale_transform(patch_scale normalised_patch_size target_shape=output_shape)<line_sep># x and y axis transform total_tform=tform_patch_scale+tform_shift_uncenter+augment_tform+tform_shift_center+tform_normscale<line_sep># Time axis transform t_map=range(mri_slice.shape[0])<if_stmt>"roll_time"<in>augment<block_start>t_map=np.roll(t_map int(np.floor(augment["roll_time"])))<block_end><if_stmt>"flip_time"<in>augment<and>augment["flip_time"]<g>0.5<block_start>t_map=t_map[::-1]<block_end><for_stmt>j,frame enumerate(mri_slice)<block_start>j_shifted=t_map[j]<line_sep>result[i j_shifted]=fast_warp(frame total_tform output_shape=output_shape)<block_end><block_end><return>result<block_end><def_stmt>resize_and_augment images output_shape=(50 50) augment=<none><block_start><if_stmt>augment<is><none><block_start><return>resize_to_make_it_fit(images output_shape=output_shape)<block_end>max_time=max(images[i].shape[0]<for>i xrange(len(images)))<line_sep>final_shape=(len(images) max_time)+output_shape<line_sep>result=np.zeros(final_shape dtype="float32")<line_sep>volume_change=[]<line_sep>#result.reshape((final_shape[0],-1) + output_shape) <for_stmt>i,mri_slice enumerate(images)<block_start>mri_slice=mri_slice.reshape((-1 )+mri_slice.shape[-2:])<line_sep>scaling=max(1.0<times>mri_slice[0].shape[-2]/output_shape[-2] 1.0<times>mri_slice[0].shape[-1]/output_shape[-1])<line_sep>tform=build_rescale_transform(scaling mri_slice[0].shape[-2:] target_shape=output_shape)<line_sep># add rotation # add skew # add translation tform_center,tform_uncenter=build_center_uncenter_transforms(mri_slice[0].shape[-2:])<line_sep>augment_tform=build_augmentation_transform((1.0 1.0) augment["rotation"] augment["shear"] augment["translation"] flip=<false>)<line_sep>total_tform=tform+tform_uncenter+augment_tform+tform_center<for_stmt>j,frame enumerate(mri_slice)<block_start>result[i j]=fast_warp(frame total_tform output_shape=output_shape)<block_end>A=total_tform.params[:2 :2]<line_sep>volume_change.append(np.linalg.norm(A[: 0])<times>np.linalg.norm(A[: 1]))<assert_stmt>total_tform.params[2 2]<eq>1 (total_tform.params[2 2] )<block_end>#result.reshape(final_shape) <return>result volume_change<block_end><def_stmt>resize_to_make_sunny_fit image output_shape=(50 50)<block_start>scaling=max(image.shape[-2]/output_shape[-2] image.shape[-1]/output_shape[-1])<line_sep>tform=build_rescale_transform(scaling image.shape[-2:] target_shape=output_shape)<line_sep><return>fast_warp(image tform output_shape=output_shape)<block_end><def_stmt>resize_and_augment_sunny image output_shape=(50 50) augment=<none><block_start><if_stmt>augment<is><none><block_start><return>resize_to_make_sunny_fit(image output_shape=(50 50))<block_end>final_shape=image.shape[:-2]+output_shape<line_sep>result=np.zeros(final_shape dtype="float32")<line_sep>#result.reshape((final_shape[0],-1) + output_shape) scaling=max(image.shape[-2]/output_shape[-2] image.shape[-1]/output_shape[-1])<line_sep>tform=build_rescale_transform(scaling image.shape[-2:] target_shape=output_shape)<line_sep># add rotation # add skew # add translation tform_center,tform_uncenter=build_center_uncenter_transforms(image.shape[-2:])<line_sep>augment_tform=build_augmentation_transform((1.0 1.0) augment["rotation"] augment["shear"] augment["translation"] flip=<false>)<line_sep>total_tform=tform+tform_uncenter+augment_tform+tform_center<line_sep>#result.reshape(final_shape) <return>fast_warp(image total_tform output_shape=output_shape mode='constant')<block_end><def_stmt>fast_warp img tf output_shape=(50 50) mode='constant' order=1<block_start>""" This wrapper function is faster than skimage.transform.warp """<line_sep>m=tf.params# tf._matrix is <return>skimage.transform._warps_cy._warp_fast(img m output_shape=output_shape mode=mode order=order)<block_end><def_stmt>build_centering_transform image_shape target_shape=(50 50)<block_start>rows,cols=image_shape<line_sep>trows,tcols=target_shape<line_sep>shift_x=(cols-tcols)/2.0<line_sep>shift_y=(rows-trows)/2.0<line_sep><return>skimage.transform.SimilarityTransform(translation=(shift_x shift_y))<block_end><def_stmt>build_rescale_transform downscale_factor image_shape target_shape<block_start>""" estimating the correct rescaling transform is slow, so just use the downscale_factor to define a transform directly. This probably isn't 100% correct, but it shouldn't matter much in practice. """<line_sep>rows,cols=image_shape<line_sep>trows,tcols=target_shape<line_sep>tform_ds=skimage.transform.AffineTransform(scale=(downscale_factor downscale_factor))<line_sep># centering shift_x=cols/(2.0<times>downscale_factor)-tcols/2.0<line_sep>shift_y=rows/(2.0<times>downscale_factor)-trows/2.0<line_sep>tform_shift_ds=skimage.transform.SimilarityTransform(translation=(shift_x shift_y))<line_sep><return>tform_shift_ds+tform_ds<block_end><def_stmt>build_center_uncenter_transforms image_shape<block_start>""" These are used to ensure that zooming and rotation happens around the center of the image. Use these transforms to center and uncenter the image around such a transform. """<line_sep>center_shift=np.array([image_shape[1] image_shape[0]])/2.0-0.5# need to swap rows and cols here apparently! confusing! tform_uncenter=skimage.transform.SimilarityTransform(translation=-center_shift)<line_sep>tform_center=skimage.transform.SimilarityTransform(translation=center_shift)<line_sep><return>tform_center tform_uncenter<block_end><def_stmt>build_shift_center_transform image_shape center_location patch_size<block_start>"""Shifts the center of the image to a given location. This function tries to include as much as possible of the image in the patch centered around the new center. If the patch arount the ideal center location doesn't fit within the image, we shift the center to the right so that it does. """<line_sep>center_absolute_location=[center_location[0]<times>image_shape[1] center_location[1]<times>image_shape[0]]<line_sep># Check for overlap at the edges center_absolute_location[0]=max(center_absolute_location[0] patch_size[1]/2.0)<line_sep>center_absolute_location[1]=max(center_absolute_location[1] patch_size[0]/2.0)<line_sep>center_absolute_location[0]=min(center_absolute_location[0] image_shape[1]-patch_size[1]/2.0)<line_sep>center_absolute_location[1]=min(center_absolute_location[1] image_shape[0]-patch_size[0]/2.0)<line_sep># Check for overlap at both edges <if_stmt>patch_size[0]<g>image_shape[0]<block_start>center_absolute_location[1]=image_shape[0]/2.0<block_end><if_stmt>patch_size[1]<g>image_shape[1]<block_start>center_absolute_location[0]=image_shape[1]/2.0<block_end># Build transform new_center=np.array(center_absolute_location)<line_sep>translation_center=new_center-0.5<line_sep>translation_uncenter=-np.array((patch_size[1]/2.0 patch_size[0]/2.0))-0.5<line_sep><return>(skimage.transform.SimilarityTransform(translation=translation_center) skimage.transform.SimilarityTransform(translation=translation_uncenter))<block_end><def_stmt>build_augmentation_transform zoom_x=1.0 zoom_y=1.0 skew_x=0 skew_y=0 rotate=0 shear=0 translate_x=0 translate_y=0 flip=<false> flip_vert=<false> **kwargs#print "Not performed transformations:", kwargs.keys() <block_start><if_stmt>flip<g>0.5<block_start>shear<augadd>180<line_sep>rotate<augadd>180<line_sep># shear by 180 degrees is equivalent to rotation by 180 degrees + flip. # So after that we rotate it another 180 degrees to get just the flip. <block_end><if_stmt>flip_vert<g>0.5<block_start>shear<augadd>180<block_end>tform_augment=skimage.transform.AffineTransform(scale=(1/zoom_x 1/zoom_y) rotation=np.deg2rad(rotate) shear=np.deg2rad(shear) translation=(translate_x translate_y))<line_sep>skew_x=np.deg2rad(skew_x)<line_sep>skew_y=np.deg2rad(skew_y)<line_sep>tform_skew=skimage.transform.ProjectiveTransform(matrix=np.array([[np.tan(skew_x)<times>np.tan(skew_y)+1 np.tan(skew_x) 0] [np.tan(skew_y) 1 0] [0 0 1]]))<line_sep><return>tform_skew+tform_augment<block_end>@deprecated<def_stmt>random_perturbation_transform zoom_range=[1.0 1.0] rotation_range=[0.0 0.0] skew_x_range=[0.0 0.0] skew_y_range=[0.0 0.0] shear_range=[0.0 0.0] translation_range=[0.0 0.0] do_flip=<true> allow_stretch=<false> rng=np.random<block_start>shift_x=rng.uniform(*translation_range)<line_sep>shift_y=rng.uniform(*translation_range)<line_sep>translate=(shift_x shift_y)<line_sep>rotate=rng.uniform(*rotation_range)<line_sep>shear=rng.uniform(*shear_range)<line_sep>skew_x=rng.uniform(*skew_x_range)<line_sep>skew_y=rng.uniform(*skew_y_range)<if_stmt>do_flip<block_start>flip=(rng.randint(2)<g>0)# flip half of the time <block_end><else_stmt><block_start>flip=<false><block_end># random zoom log_zoom_range=[np.log(z)<for>z zoom_range]<if_stmt>isinstance(allow_stretch float)<block_start>log_stretch_range=[-np.log(allow_stretch) np.log(allow_stretch)]<line_sep>zoom=np.exp(rng.uniform(*log_zoom_range))<line_sep>stretch_x=np.exp(rng.uniform(*log_stretch_range))<line_sep>stretch_y=np.exp(rng.uniform(*log_stretch_range))<line_sep>zoom_x=zoom<times>stretch_x<line_sep>zoom_y=zoom<times>stretch_y<block_end><elif_stmt>allow_stretch<is><true># avoid bugs, f.e. when it is an integer <block_start>zoom_x=np.exp(rng.uniform(*log_zoom_range))<line_sep>zoom_y=np.exp(rng.uniform(*log_zoom_range))<block_end><else_stmt><block_start>zoom_x=zoom_y=np.exp(rng.uniform(*log_zoom_range))<block_end># the range should be multiplicatively symmetric, so [1/1.1, 1.1] instead of [0.9, 1.1] makes more sense. <return>build_augmentation_transform(zoom_x=zoom_x zoom_y=zoom_y skew_x=skew_x skew_y=skew_y rotate=rotate shear=shear translate_x=translate[0] translate_y=translate[1] flip=flip)<block_end>@deprecated<def_stmt>perturb img augmentation_params target_shape=(50 50) rng=np.random# # DEBUG: draw a border to see where the image ends up # img[0, :] = 0.5 # img[-1, :] = 0.5 # img[:, 0] = 0.5 # img[:, -1] = 0.5 <block_start>tform_centering=build_centering_transform(img.shape target_shape)<line_sep>tform_center,tform_uncenter=build_center_uncenter_transforms(img.shape)<line_sep>tform_augment=random_perturbation_transform(rng=rng **augmentation_params)<line_sep>tform_augment=tform_uncenter+tform_augment+tform_center# shift to center, augment, shift back (for the rotation/shearing) <return>fast_warp(img tform_centering+tform_augment output_shape=target_shape mode='constant').astype('float32')<block_end>## RESCALING @deprecated<def_stmt>perturb_rescaled img scale augmentation_params target_shape=(50 50) rng=np.random<block_start>""" scale is a DOWNSCALING factor. """<line_sep>tform_rescale=build_rescale_transform(scale img.shape target_shape)# also does centering tform_center,tform_uncenter=build_center_uncenter_transforms(img.shape)<line_sep>tform_augment=random_perturbation_transform(rng=rng **augmentation_params)<line_sep>tform_augment=tform_uncenter+tform_augment+tform_center# shift to center, augment, shift back (for the rotation/shearing) <return>fast_warp(img tform_rescale+tform_augment output_shape=target_shape mode='constant').astype('float32')<block_end># for test-time augmentation @deprecated<def_stmt>perturb_rescaled_fixed img scale tform_augment target_shape=(50 50)<block_start>""" scale is a DOWNSCALING factor. """<line_sep>tform_rescale=build_rescale_transform(scale img.shape target_shape)# also does centering tform_center,tform_uncenter=build_center_uncenter_transforms(img.shape)<line_sep>tform_augment=tform_uncenter+tform_augment+tform_center# shift to center, augment, shift back (for the rotation/shearing) <return>fast_warp(img tform_rescale+tform_augment output_shape=target_shape mode='constant').astype('float32')<block_end>
# --------------------------------------------------------------------------- # # test_doctests.py # # # # Copyright © 2015-2021, <NAME>, original author. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # --------------------------------------------------------------------------- # <import_stmt>doctest<import_stmt>importlib<import_stmt>os<import_stmt>pathlib<import_stmt>unittest<import_from_stmt>tests.base TestCase# type: ignore <class_stmt>DoctestTests(TestCase)# pragma: no cover <block_start>@staticmethod<def_stmt>_modules <block_start>test_dir=pathlib.Path(__file__).parent<line_sep>root_dir=test_dir.parent<line_sep>source_dir=root_dir/'pottery'<line_sep>source_files=source_dir.glob('**/*.py')<for_stmt>source_file source_files<block_start>relative_path=source_file.relative_to(root_dir)<line_sep>parts=list(relative_path.parts)<line_sep>parts[-1]=source_file.stem<line_sep>module_name='.'.join(parts)<line_sep>module=importlib.import_module(module_name)<line_sep><yield>module<block_end><block_end>@unittest.skipUnless('TEST_DOCTESTS'<in>os.environ 'our doctests run too slowly' )<def_stmt>test_doctests self<block_start>'Run doctests and confirm that they work and are not science fiction'<for_stmt>module self._modules()<block_start><with_stmt>self.subTest(module=module)<block_start>results=doctest.testmod(m=module)<assert_stmt><not>results.failed<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- """ Microsoft-Windows-DCLocator GUID : cfaa5446-c6c4-4f5c-866f-31c9b55b962d """<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=1 version=0)<class_stmt>Microsoft_Windows_DCLocator_1_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=2 version=0)<class_stmt>Microsoft_Windows_DCLocator_2_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=3 version=0)<class_stmt>Microsoft_Windows_DCLocator_3_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=4 version=0)<class_stmt>Microsoft_Windows_DCLocator_4_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=5 version=0)<class_stmt>Microsoft_Windows_DCLocator_5_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=6 version=0)<class_stmt>Microsoft_Windows_DCLocator_6_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=7 version=0)<class_stmt>Microsoft_Windows_DCLocator_7_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=8 version=0)<class_stmt>Microsoft_Windows_DCLocator_8_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d") event_id=9 version=0)<class_stmt>Microsoft_Windows_DCLocator_9_0(Etw)<block_start>pattern=Struct("Message"/WString)<block_end>
# Copyright Amazon.com, Inc. or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # <import_stmt>inspect<import_from_stmt>typing TYPE_CHECKING Any Dict<import_from_stmt>aft_common aft_utils<as>utils<import_from_stmt>aft_common notifications<import_from_stmt>aft_common.customizations get_running_pipeline_count list_pipelines<import_from_stmt>boto3.session Session<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>aws_lambda_powertools.utilities.typing LambdaContext<block_end><else_stmt><block_start>LambdaContext=object<block_end>logger=utils.get_logger()<def_stmt>lambda_handler event:Dict[str Any] context:LambdaContext<arrow>Dict[str int]<block_start>session=Session()<try_stmt><block_start>pipelines=list_pipelines(session)<line_sep>running_pipelines=get_running_pipeline_count(session pipelines)<line_sep><return>{"running_pipelines":running_pipelines}<block_end><except_stmt>Exception<as>error<block_start>notifications.send_lambda_failure_sns_message(session=session message=str(error) context=context subject="Failed to list all AFT account customization pipelines" )<line_sep>message={"FILE":__file__.split("/")[-1] "METHOD":inspect.stack()[0][3] "EXCEPTION":str(error) }<line_sep>logger.exception(message)<line_sep><raise><block_end><block_end>
__package__='archivebox.core'<import_from_stmt>django.utils timezone<import_from_stmt>..config PUBLIC_SNAPSHOTS<def_stmt>detect_timezone request activate:bool=<true><block_start>gmt_offset=(request.COOKIES.get('GMT_OFFSET')<or>'').strip()<line_sep>tz=<none><if_stmt>gmt_offset.replace('-' '').isdigit()<block_start>tz=timezone.get_fixed_timezone(int(gmt_offset))<if_stmt>activate<block_start>timezone.activate(tz)<block_end><block_end># print('GMT_OFFSET', gmt_offset, tz) <return>tz<block_end><def_stmt>TimezoneMiddleware get_response<block_start><def_stmt>middleware request<block_start>detect_timezone(request activate=<true>)<line_sep><return>get_response(request)<block_end><return>middleware<block_end><def_stmt>CacheControlMiddleware get_response<block_start><def_stmt>middleware request<block_start>response=get_response(request)<if_stmt>'/archive/'<in>request.path<or>'/static/'<in>request.path<block_start>policy='public'<if>PUBLIC_SNAPSHOTS<else>'private'<line_sep>response['Cache-Control']=f'{policy}, max-age=60, stale-while-revalidate=300'<line_sep># print('Set Cache-Control header to', response['Cache-Control']) <block_end><return>response<block_end><return>middleware<block_end>
<import_from_stmt>django.utils.functional wraps<def_stmt>frame_deny_exempt view<block_start>@wraps(view)<def_stmt>inner *args **kwargs<block_start>response=view(*args **kwargs)<line_sep>response._frame_deny_exempt=<true><line_sep><return>response<block_end><return>inner<block_end>
<class_stmt>Solution<block_start><def_stmt>maxSumTwoNoOverlap self A:List[int] L:int M:int<arrow>int<block_start>prefix,n,res,left=[0<for>_ range(len(A)+1)] len(A)+1 0 0<for_stmt>i range(1 n)<block_start>prefix[i]=prefix[i-1]+A[i-1]<block_end><for_stmt>i range(L+M n)<block_start>left=max(left prefix[i-M]-prefix[i-M-L])<line_sep>res=max(res left+prefix[i]-prefix[i-M])<block_end>left=0<for_stmt>i range(L+M n)<block_start>left=max(left prefix[i-L]-prefix[i-M-L])<line_sep>res=max(res left+prefix[i]-prefix[i-L])<block_end><return>res<block_end><block_end>
<import_from_stmt>tkinter Tk Label Button<import_from_stmt>tkinter *<import_stmt>sys<import_stmt>time<import_stmt>os<import_stmt>RPi.GPIO<as>GPIO<line_sep>GPIO.setwarnings(<false>)<line_sep>GPIO.cleanup()<line_sep>GPIO.setmode(GPIO.BCM)<line_sep>GPIO.setup(21 GPIO.IN pull_up_down=GPIO.PUD_DOWN)<line_sep>top=Tk()<line_sep>top.minsize(666 666)<line_sep>top.maxsize(666 666)<line_sep>###################################################################### <class_stmt>App<block_start><def_stmt>__init__ self master#################################################################### <block_start>self.button=Button(top text='START' command=self.convert0)<line_sep>self.button.place(x=50 y=50)<line_sep>self.label=Label(top text='').grid(row=20 column=5)<line_sep>self.clock=Label(top font=('times' 20 'bold') bg='green')<line_sep>self.clock.place(x=200 y=200)<line_sep>self.isRunning=<false><line_sep>GPIO.add_event_detect(21 GPIO.BOTH callback=self.callback)<line_sep>################################################################### <block_end><def_stmt>convert0 self tog=[0]<block_start>tog[0]=<not>tog[0]<if_stmt>tog[0]######################################### <block_start>self.button.config(text='START')<line_sep>self.button.configure(bg="blue")<line_sep>self.button.configure(fg="white")<line_sep>self.label=Label(top text='OFF' bg="blue" fg="white").place(x=150 y=55)<line_sep>######################################### <block_end><else_stmt><block_start>self.button.config(text='STOP')<line_sep>self.button.configure(bg="red")<line_sep>self.button.configure(fg="white")<line_sep>self.label=Label(top text='OFF' bg="red" fg="red").place(x=150 y=55)<line_sep>self.label=Label(top text='ON' bg="red" fg="white").place(x=150 y=55)<block_end><block_end>######################################### <def_stmt>tick self# get the current local time from the PC <block_start>time1=time.strftime('%I:%M:%S')<line_sep># if time string has changed, update it self.clock.config(text=time1)<line_sep># calls itself every 200 milliseconds # to update the time display as needed # could use >200 ms, but display gets jerky <if_stmt>self.isRunning<block_start>self.clock.after(200 self.tick)<block_end><block_end>################################################################### <def_stmt>start self<block_start>self.isRunning=<true><line_sep>self.clock.after(200 self.tick)<block_end><def_stmt>stop self<block_start>self.isRunning=<false><block_end><def_stmt>callback self channel<block_start><if_stmt>self.isRunning<block_start>self.stop()<block_end><else_stmt><block_start>self.start()<block_end><block_end><block_end>app=App(top)<line_sep>top.mainloop()<line_sep>
<import_from_stmt>tensortrade.feed Stream DataFeed<def_stmt>test_generic <block_start>s1=Stream.source(["hello" "my" "name" "is"] dtype="string")<line_sep>s2=Stream.source([1 2 3 4 5 6])<line_sep>g1=s1.apply(<lambda>x:x[0]).rename("g1")<line_sep>g2=s2.lag().rename("g2")<line_sep>feed=DataFeed([g1 g2])<line_sep>feed.compile()<line_sep>feed.next()<assert_stmt>feed.next()<eq>{"g1":"m" "g2":1}<block_end>
# -------------------------------------------------------- # OpenVQA # Written by <NAME> https://github.com/ParadoxZW # based on the implementation in https://github.com/hengyuan-hu/bottom-up-attention-vqa # ELU is chosen as the activation function in non-linear layers due to # the experiment results that indicate ELU is better than ReLU in BUTD model. # -------------------------------------------------------- <import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.nn.utils.weight_norm weight_norm<import_stmt>torch<import_stmt>math<line_sep># ------------------------------ # ----- Weight Normal MLP ------ # ------------------------------ <class_stmt>MLP(nn.Module)<block_start>""" class for non-linear fully connect network """<def_stmt>__init__ self dims act='ELU' dropout_r=0.0<block_start>super(MLP self).__init__()<line_sep>layers=[]<for_stmt>i range(len(dims)-1)<block_start>in_dim=dims[i]<line_sep>out_dim=dims[i+1]<if_stmt>dropout_r<g>0<block_start>layers.append(nn.Dropout(dropout_r))<block_end>layers.append(weight_norm(nn.Linear(in_dim out_dim) dim=<none>))<if_stmt>act<ne>''<block_start>layers.append(getattr(nn act)())<block_end><block_end>self.mlp=nn.Sequential(*layers)<block_end><def_stmt>forward self x<block_start><return>self.mlp(x)<block_end><block_end># ------------------------------ # ---Top Down Attention Map ---- # ------------------------------ <class_stmt>AttnMap(nn.Module)<block_start>''' implementation of top down attention '''<def_stmt>__init__ self __C<block_start>super(AttnMap self).__init__()<line_sep>self.__C=__C<line_sep>self.linear_q=weight_norm(nn.Linear(__C.HIDDEN_SIZE __C.HIDDEN_SIZE) dim=<none>)<line_sep>self.linear_v=weight_norm(nn.Linear(__C.IMG_FEAT_SIZE __C.IMG_FEAT_SIZE) dim=<none>)<line_sep>self.nonlinear=MLP([__C.IMG_FEAT_SIZE+__C.HIDDEN_SIZE __C.HIDDEN_SIZE] dropout_r=__C.DROPOUT_R)<line_sep>self.linear=weight_norm(nn.Linear(__C.HIDDEN_SIZE 1) dim=<none>)<block_end><def_stmt>forward self q v<block_start>v=self.linear_v(v)<line_sep>q=self.linear_q(q)<line_sep>logits=self.logits(q v)<line_sep>w=nn.functional.softmax(logits 1)<line_sep><return>w<block_end><def_stmt>logits self q v<block_start>num_objs=v.size(1)<line_sep>q=q.unsqueeze(1).repeat(1 num_objs 1)<line_sep>vq=torch.cat((v q) 2)<line_sep>joint_repr=self.nonlinear(vq)<line_sep>logits=self.linear(joint_repr)<line_sep><return>logits<block_end><block_end># ------------------------------ # ---- Attended Joint Map ------ # ------------------------------ <class_stmt>TDA(nn.Module)<block_start><def_stmt>__init__ self __C<block_start>super(TDA self).__init__()<line_sep>self.__C=__C<line_sep>self.v_att=AttnMap(__C)<line_sep>self.q_net=MLP([__C.HIDDEN_SIZE __C.HIDDEN_SIZE])<line_sep>self.v_net=MLP([__C.IMG_FEAT_SIZE __C.HIDDEN_SIZE])<block_end><def_stmt>forward self q v<block_start>att=self.v_att(q v)<line_sep>atted_v=(att<times>v).sum(1)<line_sep>q_repr=self.q_net(q)<line_sep>v_repr=self.v_net(atted_v)<line_sep>joint_repr=q_repr<times>v_repr<line_sep><return>joint_repr<block_end><block_end>
<import_stmt>py<import_stmt>sys<import_from_stmt>rpython.jit.metainterp.history ConstInt INT FLOAT<import_from_stmt>rpython.jit.metainterp.history BasicFailDescr TargetToken<import_from_stmt>rpython.jit.metainterp.resoperation rop<import_from_stmt>rpython.jit.metainterp.resoperation InputArgInt InputArgRef InputArgFloat<import_from_stmt>rpython.jit.backend.detect_cpu getcpuclass<import_from_stmt>rpython.jit.backend.llsupport.regalloc FrameManager LinkedList<import_from_stmt>rpython.jit.backend.llsupport.regalloc RegisterManager<as>BaseRegMan Lifetime<as>RealLifetime UNDEF_POS BaseRegalloc compute_vars_longevity LifetimeManager<import_from_stmt>rpython.jit.tool.oparser parse<import_from_stmt>rpython.jit.codewriter.effectinfo EffectInfo<import_from_stmt>rpython.rtyper.lltypesystem lltype<import_from_stmt>rpython.rtyper.annlowlevel llhelper<def_stmt>newboxes *values<block_start><return>[InputArgInt(v)<for>v values]<block_end><def_stmt>newrefboxes count<block_start><return>[InputArgRef()<for>_ range(count)]<block_end><def_stmt>Lifetime definition_pos=UNDEF_POS last_usage=UNDEF_POS real_usages=UNDEF_POS<block_start><if_stmt>real_usages<eq>UNDEF_POS<block_start>real_usages=last_usage<block_end>lifetime=RealLifetime(definition_pos last_usage)<if_stmt>isinstance(real_usages int)<block_start>real_usages=[real_usages]<block_end>lifetime.real_usages=real_usages<line_sep><return>lifetime<block_end><def_stmt>boxes_and_longevity num<block_start>res=[]<line_sep>longevity={}<for_stmt>i range(num)<block_start>box=InputArgInt(0)<line_sep>res.append(box)<line_sep>longevity[box]=Lifetime(0 1)<block_end><return>res longevity<block_end><class_stmt>FakeReg(object)<block_start><def_stmt>__init__ self i<block_start>self.n=i<block_end><def_stmt>_getregkey self<block_start><return>self.n<block_end><def_stmt>is_memory_reference self<block_start><return><false><block_end><def_stmt>__repr__ self<block_start><return>'r%d'%self.n<block_end><block_end>r0,r1,r2,r3=[FakeReg(i)<for>i range(4)]<line_sep>r4,r5,r6,r7,r8,r9=[FakeReg(i)<for>i range(4 10)]<line_sep>regs=[r0 r1 r2 r3]<class_stmt>RegisterManager(BaseRegMan)<block_start>all_regs=regs<def_stmt>__init__ self longevity frame_manager=<none> assembler=<none><block_start><if_stmt>isinstance(longevity dict)<block_start>longevity=LifetimeManager(longevity)<block_end>BaseRegMan.__init__(self longevity frame_manager assembler)<block_end><def_stmt>convert_to_imm self v<block_start><return>v<block_end><block_end><class_stmt>FakeFramePos(object)<block_start><def_stmt>__init__ self pos box_type<block_start>self.pos=pos<line_sep>self.value=pos<line_sep>self.box_type=box_type<block_end><def_stmt>_getregkey self<block_start><return>~self.value<block_end><def_stmt>is_memory_reference self<block_start><return><true><block_end><def_stmt>__repr__ self<block_start><return>'FramePos<%d,%s>'%(self.pos self.box_type)<block_end><def_stmt>__eq__ self other<block_start><return>self.pos<eq>other.pos<and>self.box_type<eq>other.box_type<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end><class_stmt>TFrameManagerEqual(FrameManager)<block_start><def_stmt>frame_pos self i box_type<block_start><return>FakeFramePos(i box_type)<block_end><def_stmt>frame_size self box_type<block_start><return>1<block_end><def_stmt>get_loc_index self loc<block_start><assert_stmt>isinstance(loc FakeFramePos)<line_sep><return>loc.pos<block_end><block_end><class_stmt>TFrameManager(FrameManager)<block_start><def_stmt>frame_pos self i box_type<block_start><return>FakeFramePos(i box_type)<block_end><def_stmt>frame_size self box_type<block_start><if_stmt>box_type<eq>FLOAT<block_start><return>2<block_end><else_stmt><block_start><return>1<block_end><block_end><def_stmt>get_loc_index self loc<block_start><assert_stmt>isinstance(loc FakeFramePos)<line_sep><return>loc.pos<block_end><block_end><class_stmt>FakeCPU(object)<block_start><def_stmt>get_baseofs_of_frame_field self<block_start><return>0<block_end><block_end><class_stmt>MockAsm(object)<block_start><def_stmt>__init__ self<block_start>self.moves=[]<line_sep>self.emitted=[]<line_sep>self.cpu=FakeCPU()<line_sep># XXX register allocation statistics to be removed later self.num_moves_calls=0<line_sep>self.num_moves_jump=0<line_sep>self.num_spills=0<line_sep>self.num_spills_to_existing=0<line_sep>self.num_reloads=0<line_sep>self.preamble_num_moves_calls=0<line_sep>self.preamble_num_moves_jump=0<line_sep>self.preamble_num_spills=0<line_sep>self.preamble_num_spills_to_existing=0<line_sep>self.preamble_num_reloads=0<block_end><def_stmt>regalloc_mov self from_loc to_loc<block_start>self.moves.append((from_loc to_loc))<line_sep>self.emitted.append(("move" to_loc from_loc))<block_end><block_end><def_stmt>test_lifetime_next_real_usage <block_start>lt=RealLifetime(0 1000)<line_sep>lt.real_usages=[0 1 5 10 24 35 55 56 57 90 92 100]<for_stmt>i range(100)<block_start>next=lt.next_real_usage(i)<assert_stmt>next<in>lt.real_usages<assert_stmt>next<g>i<assert_stmt>lt.real_usages[lt.real_usages.index(next)-1]<le>i<block_end><assert_stmt>lt.next_real_usage(100)<eq>-1<assert_stmt>lt.next_real_usage(101)<eq>-1<block_end><def_stmt>test_fixed_position <block_start>b0,b1,b2=newboxes(0 0 0)<line_sep>l0=Lifetime(0 5)<line_sep>l1=Lifetime(2 9)<line_sep>l2=Lifetime(0 9)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2})<line_sep>longevity.fixed_register(1 r0 b0)<line_sep>longevity.fixed_register(4 r2 b0)<line_sep>longevity.fixed_register(5 r1 b1)<line_sep>longevity.fixed_register(8 r1 b1)<assert_stmt>l0.fixed_positions<eq>[(1 r0) (4 r2)]<assert_stmt>l1.fixed_positions<eq>[(5 r1) (8 r1)]<assert_stmt>l2.fixed_positions<is><none><line_sep>fpr0=longevity.fixed_register_use[r0]<line_sep>fpr1=longevity.fixed_register_use[r1]<line_sep>fpr2=longevity.fixed_register_use[r2]<assert_stmt>r3<not><in>longevity.fixed_register_use<assert_stmt>fpr0.index_lifetimes<eq>[(1 0)]<assert_stmt>fpr1.index_lifetimes<eq>[(5 2) (8 5)]<assert_stmt>fpr2.index_lifetimes<eq>[(4 1)]<block_end><def_stmt>test_fixed_position_none <block_start>b0,b1,b2=newboxes(0 0 0)<line_sep>l0=Lifetime(0 5)<line_sep>l1=Lifetime(2 9)<line_sep>l2=Lifetime(0 9)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2})<line_sep>longevity.fixed_register(1 r0)<line_sep>longevity.fixed_register(4 r2)<line_sep>longevity.fixed_register(5 r1)<line_sep>longevity.fixed_register(8 r1)<line_sep>fpr0=longevity.fixed_register_use[r0]<line_sep>fpr1=longevity.fixed_register_use[r1]<line_sep>fpr2=longevity.fixed_register_use[r2]<assert_stmt>r3<not><in>longevity.fixed_register_use<assert_stmt>fpr0.index_lifetimes<eq>[(1 1)]<assert_stmt>fpr1.index_lifetimes<eq>[(5 5) (8 8)]<assert_stmt>fpr2.index_lifetimes<eq>[(4 4)]<block_end><def_stmt>test_free_until_pos_none <block_start>longevity=LifetimeManager({})<line_sep>longevity.fixed_register(5 r1 <none>)<line_sep>longevity.fixed_register(8 r1 <none>)<line_sep>longevity.fixed_register(35 r1 <none>)<line_sep>fpr1=longevity.fixed_register_use[r1]<assert_stmt>fpr1.free_until_pos(0)<eq>5<assert_stmt>fpr1.free_until_pos(1)<eq>5<assert_stmt>fpr1.free_until_pos(2)<eq>5<assert_stmt>fpr1.free_until_pos(3)<eq>5<assert_stmt>fpr1.free_until_pos(4)<eq>5<assert_stmt>fpr1.free_until_pos(5)<eq>5<assert_stmt>fpr1.free_until_pos(10)<eq>35<assert_stmt>fpr1.free_until_pos(20)<eq>35<assert_stmt>fpr1.free_until_pos(30)<eq>35<assert_stmt>fpr1.free_until_pos(36)<eq>sys.maxint<block_end><def_stmt>test_free_until_pos <block_start>b0,b1,b2=newboxes(0 0 0)<line_sep>l0=Lifetime(0 5)<line_sep>l1=Lifetime(2 9)<line_sep>l2=Lifetime(30 40)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2})<line_sep>longevity.fixed_register(5 r1 b1)<line_sep>longevity.fixed_register(8 r1 b1)<line_sep>longevity.fixed_register(35 r1 b2)<line_sep>fpr1=longevity.fixed_register_use[r1]<line_sep># simple cases: we are before the beginning of the lifetime of the variable # in the fixed register, then it's free until the definition of the # variable <assert_stmt>fpr1.free_until_pos(0)<eq>2<assert_stmt>fpr1.free_until_pos(1)<eq>2<assert_stmt>fpr1.free_until_pos(2)<eq>2<assert_stmt>fpr1.free_until_pos(10)<eq>30<assert_stmt>fpr1.free_until_pos(20)<eq>30<assert_stmt>fpr1.free_until_pos(30)<eq>30<line_sep># after the fixed use, we are fine anyway <assert_stmt>fpr1.free_until_pos(36)<eq>sys.maxint<assert_stmt>fpr1.free_until_pos(50)<eq>sys.maxint<line_sep># asking for a position *after* the definition of the variable in the fixed # register means the variable didn't make it into the fixed register, but # at the latest by the use point it will have to go there <assert_stmt>fpr1.free_until_pos(3)<eq>5<assert_stmt>fpr1.free_until_pos(4)<eq>5<assert_stmt>fpr1.free_until_pos(5)<eq>5<assert_stmt>fpr1.free_until_pos(6)<eq>8<assert_stmt>fpr1.free_until_pos(7)<eq>8<assert_stmt>fpr1.free_until_pos(8)<eq>8<assert_stmt>fpr1.free_until_pos(31)<eq>35<assert_stmt>fpr1.free_until_pos(32)<eq>35<assert_stmt>fpr1.free_until_pos(33)<eq>35<assert_stmt>fpr1.free_until_pos(34)<eq>35<assert_stmt>fpr1.free_until_pos(35)<eq>35<block_end><def_stmt>test_free_until_pos_different_regs <block_start>b0,b1,b2=newboxes(0 0 0)<line_sep>l0=Lifetime(0 5)<line_sep>l1=Lifetime(2 9)<line_sep>l2=Lifetime(30 40)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2})<line_sep>longevity.fixed_register(1 r0 b0)<line_sep>longevity.fixed_register(4 r2 b0)<line_sep>fpr2=longevity.fixed_register_use[r2]<line_sep># the definition of b0 is before the other fixed register use of r0, so the # earliest b0 can be in r2 is that use point at index 1 <assert_stmt>fpr2.free_until_pos(0)<eq>1<block_end><def_stmt>test_longest_free_reg <block_start>b0,b1,b2=newboxes(0 0 0)<line_sep>l0=Lifetime(0 5)<line_sep>l1=Lifetime(2 9)<line_sep>l2=Lifetime(30 40)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2})<line_sep>longevity.fixed_register(1 r0 b0)<line_sep>longevity.fixed_register(4 r2 b0)<line_sep>longevity.fixed_register(5 r1 b1)<line_sep>longevity.fixed_register(8 r1 b1)<line_sep>longevity.fixed_register(35 r1 b2)<assert_stmt>longevity.longest_free_reg(0 [r0 r1 r2])<eq>(r1 2)<block_end><def_stmt>test_try_pick_free_reg <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(0 4)<line_sep>l1=Lifetime(2 20)<line_sep>l2=Lifetime(6 20)<line_sep>l3=Lifetime(8 20)<line_sep>l4=Lifetime(0 10)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2 b3:l3 b4:l4})<line_sep>longevity.fixed_register(3 r1 b1)<line_sep>longevity.fixed_register(7 r2 b2)<line_sep>longevity.fixed_register(9 r3 b3)<line_sep># a best fit loc=longevity.try_pick_free_reg(0 b0 [r1 r2 r3 r4 r5])<assert_stmt>loc<is>r2<line_sep># does not fit into any of the fixed regs, use a non-fixed one loc=longevity.try_pick_free_reg(0 b4 [r5 r2 r3 r4 r1])<assert_stmt>loc<in>[r4 r5]<line_sep># all available are fixed but var doesn't fit completely into any of these. # pick the biggest interval loc=longevity.try_pick_free_reg(0 b4 [r1 r2 r3])<assert_stmt>loc<is>r3<block_end><def_stmt>test_try_pick_free_reg_bug <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(10 30)<line_sep>l1=Lifetime(0 15)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1})<line_sep>longevity.fixed_register(20 r0 b0)<line_sep># does not fit into r0, use r1 loc=longevity.try_pick_free_reg(0 b1 [r0 r1])<assert_stmt>loc<eq>r1<block_end><def_stmt>test_try_pick_free_reg_bug2 <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(1 2)<line_sep>l1=Lifetime(2 4)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1})<line_sep>longevity.fixed_register(4 r1 b1)<line_sep># does not fit into r0, use r1 loc=longevity.try_pick_free_reg(0 b0 [r0 r1])<assert_stmt>loc<eq>r0<block_end><def_stmt>test_simple_coalescing <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(0 4)<line_sep>l1=Lifetime(4 20)<line_sep>l2=Lifetime(4 20)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2})<line_sep>longevity.fixed_register(10 r1 b1)<line_sep>longevity.fixed_register(10 r2 b2)<line_sep>longevity.try_use_same_register(b0 b2)<line_sep>loc=longevity.try_pick_free_reg(0 b0 [r0 r1 r2 r3 r4])<assert_stmt>loc<is>r2<block_end><def_stmt>test_coalescing_blocks_regs_correctly <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(10 30)<line_sep>l1=Lifetime(30 40)<line_sep>l2=Lifetime(30 40)<line_sep>l3=Lifetime(0 15)<line_sep>l4=Lifetime(0 5)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2 b3:l3 b4:l4})<line_sep>longevity.try_use_same_register(b0 b1)<line_sep>longevity.fixed_register(35 r1 b1)<line_sep>longevity.fixed_register(35 r2 b2)<line_sep>loc=longevity.try_pick_free_reg(0 b3 [r1 r2])<line_sep># r2 is picked, otherwise b0 can't end up in r1 <assert_stmt>loc<is>r2<line_sep>loc=longevity.try_pick_free_reg(0 b4 [r1 r2])<line_sep># r1 is picked, because b4 fits before b0 <assert_stmt>loc<is>r1<block_end><def_stmt>test_coalescing_non_fixed_regs <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(0 10)<line_sep>l1=Lifetime(10 20)<line_sep>l2=Lifetime(25 40)<line_sep>l3=Lifetime(15 40)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2 b3:l3})<line_sep>longevity.try_use_same_register(b0 b1)<line_sep>longevity.fixed_register(35 r2 b2)<line_sep>longevity.fixed_register(35 r3 b3)<line_sep>loc=longevity.try_pick_free_reg(0 b0 [r1 r2 r3])<line_sep># r2 is picked, otherwise b1 can't end up in the same reg as b0 <assert_stmt>loc<is>r2<block_end><def_stmt>test_chained_coalescing # 5 + b4 # | # 10 + b0 | # | | # | 15 + # | # + # 20 # + b1 # | # | # | # + # 30 # + b2 # | # r1 * # | # + # 40 <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>l0=Lifetime(10 20)<line_sep>l1=Lifetime(20 30)<line_sep>l2=Lifetime(30 40)<line_sep>l4=Lifetime(5 15)<line_sep>longevity=LifetimeManager({b0:l0 b1:l1 b2:l2 b4:l4})<line_sep>longevity.try_use_same_register(b0 b1)<line_sep>longevity.try_use_same_register(b1 b2)<line_sep>longevity.fixed_register(35 r1 b2)<line_sep>loc=longevity.try_pick_free_reg(5 b4 [r0 r1])<assert_stmt>loc<is>r0<block_end><class_stmt>TestRegalloc(object)<block_start><def_stmt>test_freeing_vars self<block_start>b0,b1,b2=newboxes(0 0 0)<line_sep>longevity={b0:Lifetime(0 1) b1:Lifetime(0 2) b2:Lifetime(0 2)}<line_sep>rm=RegisterManager(longevity)<line_sep>rm.next_instruction()<for_stmt>b b0 b1 b2<block_start>rm.try_allocate_reg(b)<block_end>rm._check_invariants()<assert_stmt>len(rm.free_regs)<eq>1<assert_stmt>len(rm.reg_bindings)<eq>3<line_sep>rm.possibly_free_vars([b0 b1 b2])<assert_stmt>len(rm.free_regs)<eq>1<assert_stmt>len(rm.reg_bindings)<eq>3<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>rm.possibly_free_vars([b0 b1 b2])<line_sep>rm._check_invariants()<assert_stmt>len(rm.free_regs)<eq>2<assert_stmt>len(rm.reg_bindings)<eq>2<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>rm.possibly_free_vars([b0 b1 b2])<line_sep>rm._check_invariants()<assert_stmt>len(rm.free_regs)<eq>4<assert_stmt>len(rm.reg_bindings)<eq>0<block_end><def_stmt>test_register_exhaustion self<block_start>boxes,longevity=boxes_and_longevity(5)<line_sep>rm=RegisterManager(longevity)<line_sep>rm.next_instruction()<for_stmt>b boxes[:len(regs)]<block_start><assert_stmt>rm.try_allocate_reg(b)<block_end><assert_stmt>rm.try_allocate_reg(boxes[-1])<is><none><line_sep>rm._check_invariants()<block_end><def_stmt>test_need_lower_byte self<block_start>boxes,longevity=boxes_and_longevity(5)<line_sep>b0,b1,b2,b3,b4=boxes<class_stmt>XRegisterManager(RegisterManager)<block_start>no_lower_byte_regs=[r2 r3]<block_end>rm=XRegisterManager(longevity)<line_sep>rm.next_instruction()<line_sep>loc0=rm.try_allocate_reg(b0 need_lower_byte=<true>)<assert_stmt>loc0<not><in>XRegisterManager.no_lower_byte_regs<line_sep>loc=rm.try_allocate_reg(b1 need_lower_byte=<true>)<assert_stmt>loc<not><in>XRegisterManager.no_lower_byte_regs<line_sep>loc=rm.try_allocate_reg(b2 need_lower_byte=<true>)<assert_stmt>loc<is><none><line_sep>loc=rm.try_allocate_reg(b0 need_lower_byte=<true>)<assert_stmt>loc<is>loc0<line_sep>rm._check_invariants()<block_end><def_stmt>test_specific_register self<block_start>boxes,longevity=boxes_and_longevity(5)<line_sep>rm=RegisterManager(longevity)<line_sep>rm.next_instruction()<line_sep>loc=rm.try_allocate_reg(boxes[0] selected_reg=r1)<assert_stmt>loc<is>r1<line_sep>loc=rm.try_allocate_reg(boxes[1] selected_reg=r1)<assert_stmt>loc<is><none><line_sep>rm._check_invariants()<line_sep>loc=rm.try_allocate_reg(boxes[0] selected_reg=r1)<assert_stmt>loc<is>r1<line_sep>loc=rm.try_allocate_reg(boxes[0] selected_reg=r2)<assert_stmt>loc<is>r2<line_sep>rm._check_invariants()<block_end><def_stmt>test_force_allocate_reg self<block_start>boxes,longevity=boxes_and_longevity(5)<line_sep>b0,b1,b2,b3,b4=boxes<line_sep>fm=TFrameManager()<class_stmt>XRegisterManager(RegisterManager)<block_start>no_lower_byte_regs=[r2 r3]<block_end>rm=XRegisterManager(longevity frame_manager=fm assembler=MockAsm())<line_sep>rm.next_instruction()<line_sep>loc=rm.force_allocate_reg(b0)<assert_stmt>isinstance(loc FakeReg)<line_sep>loc=rm.force_allocate_reg(b1)<assert_stmt>isinstance(loc FakeReg)<line_sep>loc=rm.force_allocate_reg(b2)<assert_stmt>isinstance(loc FakeReg)<line_sep>loc=rm.force_allocate_reg(b3)<assert_stmt>isinstance(loc FakeReg)<line_sep>loc=rm.force_allocate_reg(b4)<assert_stmt>isinstance(loc FakeReg)<line_sep># one of those should be now somewhere else locs=[rm.loc(b)<for>b boxes]<line_sep>used_regs=[loc<for>loc locs<if>isinstance(loc FakeReg)]<assert_stmt>len(used_regs)<eq>len(regs)<line_sep>loc=rm.force_allocate_reg(b0 need_lower_byte=<true>)<assert_stmt>isinstance(loc FakeReg)<assert_stmt>loc<not><in>[r2 r3]<line_sep>rm._check_invariants()<block_end><def_stmt>test_make_sure_var_in_reg self<block_start>boxes,longevity=boxes_and_longevity(5)<line_sep>fm=TFrameManager()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=MockAsm())<line_sep>rm.next_instruction()<line_sep># allocate a stack position b0,b1,b2,b3,b4=boxes<line_sep>sp=fm.loc(b0)<assert_stmt>sp.pos<eq>0<line_sep>loc=rm.make_sure_var_in_reg(b0)<assert_stmt>isinstance(loc FakeReg)<line_sep>rm._check_invariants()<block_end><def_stmt>test_bogus_make_sure_var_in_reg self<block_start>b0,=newboxes(0)<line_sep>longevity={b0:Lifetime(0 1)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep># invalid call to make_sure_var_in_reg(): box unknown so far py.test.raises(KeyError rm.make_sure_var_in_reg b0)<block_end><def_stmt>test_return_constant self<block_start>asm=MockAsm()<line_sep>boxes,longevity=boxes_and_longevity(5)<line_sep>fm=TFrameManager()<line_sep>rm=RegisterManager(longevity assembler=asm frame_manager=fm)<line_sep>rm.next_instruction()<line_sep>loc=rm.return_constant(ConstInt(1) selected_reg=r1)<assert_stmt>loc<is>r1<line_sep>loc=rm.return_constant(ConstInt(1) selected_reg=r1)<assert_stmt>loc<is>r1<line_sep>loc=rm.return_constant(ConstInt(1))<assert_stmt>isinstance(loc ConstInt)<for_stmt>box boxes[:-1]<block_start>rm.force_allocate_reg(box)<block_end><assert_stmt>len(asm.moves)<eq>2# Const(1) -> r1, twice <assert_stmt>len(rm.reg_bindings)<eq>4<line_sep>rm._check_invariants()<block_end><def_stmt>test_loc_of_const self<block_start>rm=RegisterManager({})<line_sep>rm.next_instruction()<assert_stmt>isinstance(rm.loc(ConstInt(1)) ConstInt)<block_end><def_stmt>test_call_support self<block_start><class_stmt>XRegisterManager(RegisterManager)<block_start>save_around_call_regs=[r1 r2]<def_stmt>call_result_location self v<block_start><return>r1<block_end><block_end>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>boxes,longevity=boxes_and_longevity(5)<line_sep>rm=XRegisterManager(longevity frame_manager=fm assembler=asm)<for_stmt>b boxes[:-1]<block_start>rm.force_allocate_reg(b)<block_end>rm.position=0<line_sep>rm.before_call()<assert_stmt>len(rm.reg_bindings)<eq>2<assert_stmt>fm.get_frame_depth()<eq>2<assert_stmt>len(asm.moves)<eq>2<line_sep>rm._check_invariants()<line_sep>rm.after_call(boxes[-1])<assert_stmt>len(rm.reg_bindings)<eq>3<line_sep>rm._check_invariants()<block_end><def_stmt>test_call_support_save_all_regs self<block_start><class_stmt>XRegisterManager(RegisterManager)<block_start>save_around_call_regs=[r1 r2]<def_stmt>call_result_location self v<block_start><return>r1<block_end><block_end>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>boxes,longevity=boxes_and_longevity(5)<line_sep>rm=XRegisterManager(longevity frame_manager=fm assembler=asm)<for_stmt>b boxes[:-1]<block_start>rm.force_allocate_reg(b)<block_end>rm.before_call(save_all_regs=<true>)<assert_stmt>len(rm.reg_bindings)<eq>0<assert_stmt>fm.get_frame_depth()<eq>4<assert_stmt>len(asm.moves)<eq>4<line_sep>rm._check_invariants()<line_sep>rm.after_call(boxes[-1])<assert_stmt>len(rm.reg_bindings)<eq>1<line_sep>rm._check_invariants()<block_end><def_stmt>test_different_frame_width self<block_start><class_stmt>XRegisterManager(RegisterManager)<block_start><pass><block_end>fm=TFrameManager()<line_sep>b0=InputArgInt()<line_sep>longevity={b0:Lifetime(0 1)}<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>f0=InputArgFloat()<line_sep>longevity={f0:Lifetime(0 1)}<line_sep>xrm=XRegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>xrm.loc(f0)<line_sep>rm.loc(b0)<assert_stmt>fm.get_frame_depth()<eq>3<block_end><def_stmt>test_spilling self<block_start>b0,b1,b2,b3,b4,b5=newboxes(0 1 2 3 4 5)<line_sep>longevity={b0:Lifetime(0 3) b1:Lifetime(0 3) b3:Lifetime(0 5) b2:Lifetime(0 2) b4:Lifetime(1 4) b5:Lifetime(1 3)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<for_stmt>b b0 b1 b2 b3<block_start>rm.force_allocate_reg(b)<block_end><assert_stmt>len(rm.free_regs)<eq>0<line_sep>rm.next_instruction()<line_sep>loc=rm.loc(b3)<line_sep>spilled=rm.force_allocate_reg(b4)<assert_stmt>spilled<is>loc<line_sep>spilled2=rm.force_allocate_reg(b5)<assert_stmt>spilled2<is>loc<line_sep>rm._check_invariants()<block_end><def_stmt>test_spilling_furthest_next_real_use self<block_start>b0,b1,b2,b3,b4,b5=newboxes(0 1 2 3 4 5)<line_sep>longevity={b0:Lifetime(0 3 [1 2 3]) b1:Lifetime(0 3 [3]) b3:Lifetime(0 4 [1 2 3 4]) b2:Lifetime(0 2) b4:Lifetime(1 4) b5:Lifetime(1 3)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<for_stmt>b b0 b1 b2 b3<block_start>rm.force_allocate_reg(b)<block_end><assert_stmt>len(rm.free_regs)<eq>0<line_sep>rm.next_instruction()<line_sep>loc=rm.loc(b1)<line_sep>spilled=rm.force_allocate_reg(b4)<assert_stmt>spilled<is>loc<line_sep>spilled2=rm.force_allocate_reg(b5)<assert_stmt>spilled2<is>loc<line_sep>rm._check_invariants()<block_end><def_stmt>test_spill_useless_vars_first self<block_start>b0,b1,b2,b3,b4,b5=newboxes(0 1 2 3 4 5)<line_sep>longevity={b0:Lifetime(0 5) b1:Lifetime(0 10) # b2 and b3 become useless but b3 lives longer b3:Lifetime(0 7 3) b2:Lifetime(0 6 3) b4:Lifetime(4 5) b5:Lifetime(4 7)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<for_stmt>b b0 b1 b2 b3<block_start>rm.force_allocate_reg(b)<block_end>rm.position=4<assert_stmt>len(rm.free_regs)<eq>0<line_sep>loc=rm.loc(b3)<line_sep>spilled=rm.force_allocate_reg(b4)<assert_stmt>spilled<is>loc<line_sep>loc=rm.loc(b2)<line_sep>spilled2=rm.force_allocate_reg(b5)<assert_stmt>spilled2<is>loc<line_sep>rm._check_invariants()<block_end><def_stmt>test_hint_frame_locations_1 self<block_start><for_stmt>hint_value range(11)<block_start>b0,=newboxes(0)<line_sep>fm=TFrameManager()<line_sep>fm.hint_frame_pos[b0]=hint_value<line_sep>blist=newboxes(*range(10))<for_stmt>b1 blist<block_start>fm.loc(b1)<block_end><for_stmt>b1 blist<block_start>fm.mark_as_free(b1)<block_end><assert_stmt>fm.get_frame_depth()<eq>10<line_sep>loc=fm.loc(b0)<if_stmt>hint_value<l>10<block_start>expected=hint_value<block_end><else_stmt><block_start>expected=0<block_end><assert_stmt>fm.get_loc_index(loc)<eq>expected<assert_stmt>fm.get_frame_depth()<eq>10<block_end><block_end><def_stmt>test_linkedlist self<block_start><class_stmt>Loc(object)<block_start><def_stmt>__init__ self pos size tp<block_start>self.pos=pos<line_sep>self.size=size<line_sep>self.tp=tp<block_end><block_end><class_stmt>FrameManager(object)<block_start>@staticmethod<def_stmt>get_loc_index item<block_start><return>item.pos<block_end>@staticmethod<def_stmt>frame_pos pos tp<block_start><if_stmt>tp<eq>13<block_start>size=2<block_end><else_stmt><block_start>size=1<block_end><return>Loc(pos size tp)<block_end><block_end>fm=FrameManager()<line_sep>l=LinkedList(fm)<line_sep>l.append(1 Loc(1 1 0))<line_sep>l.append(1 Loc(4 1 0))<line_sep>l.append(1 Loc(2 1 0))<line_sep>l.append(1 Loc(0 1 0))<assert_stmt>l.master_node.val<eq>0<assert_stmt>l.master_node.next.val<eq>1<assert_stmt>l.master_node.next.next.val<eq>2<assert_stmt>l.master_node.next.next.next.val<eq>4<assert_stmt>l.master_node.next.next.next.next<is><none><line_sep>item=l.pop(1 0)<assert_stmt>item.pos<eq>0<line_sep>item=l.pop(1 0)<assert_stmt>item.pos<eq>1<line_sep>item=l.pop(1 0)<assert_stmt>item.pos<eq>2<line_sep>item=l.pop(1 0)<assert_stmt>item.pos<eq>4<assert_stmt>l.pop(1 0)<is><none><line_sep>l.append(1 Loc(1 1 0))<line_sep>l.append(1 Loc(5 1 0))<line_sep>l.append(1 Loc(2 1 0))<line_sep>l.append(1 Loc(0 1 0))<line_sep>item=l.pop(2 13)<assert_stmt>item.tp<eq>13<assert_stmt>item.pos<eq>0<assert_stmt>item.size<eq>2<assert_stmt>l.pop(2 0)<is><none># 2 and 4 l.append(1 Loc(4 1 0))<line_sep>item=l.pop(2 13)<assert_stmt>item.pos<eq>4<assert_stmt>item.size<eq>2<assert_stmt>l.pop(1 0).pos<eq>2<assert_stmt>l.pop(1 0)<is><none><line_sep>l.append(2 Loc(1 2 0))<line_sep># this will not work because the result will be odd <assert_stmt>l.pop(2 13)<is><none><line_sep>l.append(1 Loc(3 1 0))<line_sep>item=l.pop(2 13)<assert_stmt>item.pos<eq>2<assert_stmt>item.tp<eq>13<assert_stmt>item.size<eq>2<block_end><def_stmt>test_frame_manager_basic_equal self<block_start>b0,b1=newboxes(0 1)<line_sep>fm=TFrameManagerEqual()<line_sep>loc0=fm.loc(b0)<assert_stmt>fm.get_loc_index(loc0)<eq>0<line_sep># <assert_stmt>fm.get(b1)<is><none><line_sep>loc1=fm.loc(b1)<assert_stmt>fm.get_loc_index(loc1)<eq>1<assert_stmt>fm.get(b1)<eq>loc1<line_sep># loc0b=fm.loc(b0)<assert_stmt>loc0b<eq>loc0<line_sep># fm.loc(InputArgInt())<assert_stmt>fm.get_frame_depth()<eq>3<line_sep># f0=InputArgFloat()<line_sep>locf0=fm.loc(f0)<assert_stmt>fm.get_loc_index(locf0)<eq>3<assert_stmt>fm.get_frame_depth()<eq>4<line_sep># f1=InputArgFloat()<line_sep>locf1=fm.loc(f1)<assert_stmt>fm.get_loc_index(locf1)<eq>4<assert_stmt>fm.get_frame_depth()<eq>5<line_sep>fm.mark_as_free(b1)<assert_stmt>fm.freelist<line_sep>b2=InputArgInt()<line_sep>fm.loc(b2)# should be in the same spot as b1 before <assert_stmt>fm.get(b1)<is><none><assert_stmt>fm.get(b2)<eq>loc1<line_sep>fm.mark_as_free(b0)<line_sep>p0=InputArgRef()<line_sep>ploc=fm.loc(p0)<assert_stmt>fm.get_loc_index(ploc)<eq>0<assert_stmt>fm.get_frame_depth()<eq>5<assert_stmt>ploc<ne>loc1<line_sep>p1=InputArgRef()<line_sep>p1loc=fm.loc(p1)<assert_stmt>fm.get_loc_index(p1loc)<eq>5<assert_stmt>fm.get_frame_depth()<eq>6<line_sep>fm.mark_as_free(p0)<line_sep>p2=InputArgRef()<line_sep>p2loc=fm.loc(p2)<assert_stmt>p2loc<eq>ploc<assert_stmt>len(fm.freelist)<eq>0<for_stmt>box fm.bindings.keys()<block_start>fm.mark_as_free(box)<block_end>fm.bind(InputArgRef() FakeFramePos(3 'r'))<assert_stmt>len(fm.freelist)<eq>6<block_end><def_stmt>test_frame_manager_basic self<block_start>b0,b1=newboxes(0 1)<line_sep>fm=TFrameManager()<line_sep>loc0=fm.loc(b0)<assert_stmt>fm.get_loc_index(loc0)<eq>0<line_sep># <assert_stmt>fm.get(b1)<is><none><line_sep>loc1=fm.loc(b1)<assert_stmt>fm.get_loc_index(loc1)<eq>1<assert_stmt>fm.get(b1)<eq>loc1<line_sep># loc0b=fm.loc(b0)<assert_stmt>loc0b<eq>loc0<line_sep># fm.loc(InputArgInt())<assert_stmt>fm.get_frame_depth()<eq>3<line_sep># f0=InputArgFloat()<line_sep>locf0=fm.loc(f0)<line_sep># can't be odd <assert_stmt>fm.get_loc_index(locf0)<eq>4<assert_stmt>fm.get_frame_depth()<eq>6<line_sep># f1=InputArgFloat()<line_sep>locf1=fm.loc(f1)<assert_stmt>fm.get_loc_index(locf1)<eq>6<assert_stmt>fm.get_frame_depth()<eq>8<line_sep>fm.mark_as_free(b1)<assert_stmt>fm.freelist<line_sep>b2=InputArgInt()<line_sep>fm.loc(b2)# should be in the same spot as b1 before <assert_stmt>fm.get(b1)<is><none><assert_stmt>fm.get(b2)<eq>loc1<line_sep>fm.mark_as_free(b0)<line_sep>p0=InputArgRef()<line_sep>ploc=fm.loc(p0)<assert_stmt>fm.get_loc_index(ploc)<eq>0<assert_stmt>fm.get_frame_depth()<eq>8<assert_stmt>ploc<ne>loc1<line_sep>p1=InputArgRef()<line_sep>p1loc=fm.loc(p1)<assert_stmt>fm.get_loc_index(p1loc)<eq>3<assert_stmt>fm.get_frame_depth()<eq>8<line_sep>fm.mark_as_free(p0)<line_sep>p2=InputArgRef()<line_sep>p2loc=fm.loc(p2)<assert_stmt>p2loc<eq>ploc<assert_stmt>len(fm.freelist)<eq>0<line_sep>fm.mark_as_free(b2)<line_sep>f3=InputArgFloat()<line_sep>fm.mark_as_free(p2)<line_sep>floc=fm.loc(f3)<assert_stmt>fm.get_loc_index(floc)<eq>0<for_stmt>box fm.bindings.keys()<block_start>fm.mark_as_free(box)<block_end><block_end><block_end><class_stmt>TestForceResultInReg(object)# use it's own class since there are so many cases <block_start><def_stmt>test_force_result_in_reg_1 self# var in reg, dies <block_start>b0,b1=newboxes(0 0)<line_sep>longevity={b0:Lifetime(0 1) b1:Lifetime(1 3)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep>loc0=rm.force_allocate_reg(b0)<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>loc=rm.force_result_in_reg(b1 b0)<assert_stmt>loc<is>loc0<assert_stmt>len(asm.moves)<eq>0<line_sep>rm._check_invariants()<block_end><def_stmt>test_force_result_in_reg_2 self# var in reg, survives <block_start>b0,b1=newboxes(0 0)<line_sep>longevity={b0:Lifetime(0 2) b1:Lifetime(1 3)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep>loc0=rm.force_allocate_reg(b0)<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>loc=rm.force_result_in_reg(b1 b0)<assert_stmt>loc<is><not>loc0<assert_stmt>rm.loc(b0)<is>loc0<assert_stmt>len(asm.moves)<eq>1<line_sep>rm._check_invariants()<block_end><def_stmt>test_force_result_in_reg_3 self# var in reg, survives, no free registers <block_start>b0,b1,b2,b3,b4=newboxes(0 0 0 0 0)<line_sep>longevity={b0:Lifetime(0 2) b1:Lifetime(0 2) b3:Lifetime(0 2) b2:Lifetime(0 2) b4:Lifetime(1 3)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<for_stmt>b b0 b1 b2 b3<block_start>rm.force_allocate_reg(b)<block_end><assert_stmt><not>len(rm.free_regs)<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>rm.force_result_in_reg(b4 b0)<line_sep>rm._check_invariants()<assert_stmt>len(asm.moves)<eq>1<block_end><def_stmt>test_force_result_in_reg_4 self<block_start>b0,b1=newboxes(0 0)<line_sep>longevity={b0:Lifetime(0 1) b1:Lifetime(0 1)}<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep>fm.loc(b0)<line_sep>rm.force_result_in_reg(b1 b0)<line_sep>rm._check_invariants()<line_sep>loc=rm.loc(b1)<assert_stmt>isinstance(loc FakeReg)<line_sep>loc=rm.loc(b0)<assert_stmt>isinstance(loc FakeFramePos)<assert_stmt>len(asm.moves)<eq>1<block_end><def_stmt>test_force_result_in_reg_const self# const <block_start>boxes,longevity=boxes_and_longevity(2)<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep>c=ConstInt(0)<line_sep>rm.force_result_in_reg(boxes[0] c)<line_sep>rm._check_invariants()<block_end># some tests where the result is supposed to go in a fixed register <def_stmt>test_force_result_in_reg_fixed_reg_1 self# var in reg, dies <block_start>b0,b1=newboxes(0 0)<line_sep>longevity=LifetimeManager({b0:Lifetime(0 1) b1:Lifetime(1 3)})<line_sep>longevity.try_use_same_register(b0 b1)<line_sep>longevity.fixed_register(1 r1 b1)<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep>loc0=rm.force_allocate_reg(b0)<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>loc=rm.force_result_in_reg(b1 b0)<assert_stmt>loc<is>loc0<assert_stmt>loc<is>r1<assert_stmt>len(asm.moves)<eq>0<line_sep>rm._check_invariants()<block_end><def_stmt>test_force_result_in_reg_fixed_reg_2 self# var in reg, survives <block_start>b0,b1=newboxes(0 0)<line_sep>longevity=LifetimeManager({b0:Lifetime(0 2) b1:Lifetime(1 3)})<line_sep># has no effect, lifetimes overlap longevity.try_use_same_register(b0 b1)<line_sep>longevity.fixed_register(1 r1 b1)<line_sep>fm=TFrameManager()<line_sep>asm=MockAsm()<line_sep>rm=RegisterManager(longevity frame_manager=fm assembler=asm)<line_sep>rm.next_instruction()<line_sep>loc0=rm.force_allocate_reg(b0)<line_sep>rm._check_invariants()<line_sep>rm.next_instruction()<line_sep>loc=rm.force_result_in_reg(b1 b0)<assert_stmt>loc<is><not>loc0<assert_stmt>rm.loc(b0)<is>loc0<assert_stmt>loc<is>r1<assert_stmt>len(asm.moves)<eq>1<line_sep>rm._check_invariants()<block_end><block_end># _____________________________________________________ # tests that assign registers in a mocked way for a fake CPU <class_stmt>RegisterManager2(BaseRegMan)<block_start>all_regs=[r0 r1 r2 r3 r4 r5 r6 r7]<line_sep>save_around_call_regs=[r0 r1 r2 r3]<line_sep>frame_reg=r8<line_sep># calling conventions: r0 is result # r1 r2 r3 are arguments and caller-saved registers # r4 r5 r6 r7 are callee-saved registers <def_stmt>convert_to_imm self v<block_start><return>v.value<block_end><def_stmt>call_result_location self v<block_start><return>r0<block_end><block_end><class_stmt>FakeRegalloc(BaseRegalloc)<block_start><def_stmt>__init__ self<block_start>self.assembler=MockAsm()<block_end><def_stmt>fake_prepare_loop self inputargs operations looptoken inputarg_locs=<none><block_start>operations=self._prepare(inputargs operations [])<line_sep>self.operations=operations<if_stmt>inputarg_locs<is><none><block_start>self._set_initial_bindings(inputargs looptoken)<block_end><else_stmt><block_start><for_stmt>v,loc zip(inputargs inputarg_locs)<block_start>self.rm.reg_bindings[v]=loc<line_sep>self.rm.free_regs.remove(loc)<block_end><block_end>self.possibly_free_vars(list(inputargs))<line_sep>self._add_fixed_registers()<line_sep><return>operations<block_end><def_stmt>_prepare self inputargs operations allgcrefs<block_start>self.fm=TFrameManager()<line_sep># compute longevity of variables longevity=compute_vars_longevity(inputargs operations)<line_sep>self.longevity=longevity<line_sep>self.rm=RegisterManager2(longevity assembler=self.assembler frame_manager=self.fm)<line_sep><return>operations<block_end><def_stmt>possibly_free_var self var<block_start>self.rm.possibly_free_var(var)<block_end><def_stmt>possibly_free_vars self vars<block_start><for_stmt>var vars<block_start><if_stmt>var<is><not><none># xxx kludgy <block_start>self.possibly_free_var(var)<block_end><block_end><block_end><def_stmt>possibly_free_vars_for_op self op<block_start><for_stmt>i range(op.numargs())<block_start>var=op.getarg(i)<if_stmt>var<is><not><none># xxx kludgy <block_start>self.possibly_free_var(var)<block_end><block_end><if_stmt>op.type<ne>'v'<block_start>self.possibly_free_var(op)<block_end><block_end><def_stmt>loc self x<block_start><return>self.rm.loc(x)<block_end><def_stmt>force_allocate_reg_or_cc self var<block_start><assert_stmt>var.type<eq>INT<if_stmt>self.next_op_can_accept_cc(self.operations self.rm.position)# hack: return the ebp location to mean "lives in CC". This # ebp will not actually be used, and the location will be freed # after the next op as usual. <block_start>self.rm.force_allocate_frame_reg(var)<line_sep><return>r8<block_end><else_stmt># else, return a regular register (not ebp). <block_start><return>self.rm.force_allocate_reg(var need_lower_byte=<true>)<block_end><block_end><def_stmt>fake_allocate self loop<block_start><import_from_stmt>rpython.jit.backend.x86.jump remap_frame_layout<def_stmt>emit *args<block_start>self.assembler.emitted.append(args)<block_end><for_stmt>i,op enumerate(loop.operations)<block_start>self.rm.position=i<line_sep>opnum=op.getopnum()<line_sep>opname=op.getopname()<if_stmt>rop.is_comparison(opnum)<block_start>locs=[self.loc(x)<for>x op.getarglist()]<line_sep>loc=self.force_allocate_reg_or_cc(op)<line_sep>emit(opname loc locs)<block_end><elif_stmt>opname.startswith("int_")<block_start>locs=[self.loc(x)<for>x op.getarglist()]<line_sep>loc=self.rm.force_result_in_reg(op op.getarg(0) op.getarglist())<line_sep>emit(opname loc locs[1:])<block_end><elif_stmt>op.is_guard()<block_start>fail_locs=[self.loc(x)<for>x op.getfailargs()]<line_sep>emit(opname self.loc(op.getarg(0)) fail_locs)<block_end><elif_stmt>rop.is_call(opnum)# calling convention! <block_start>src_locs=[self.loc(x)<for>x op.getarglist()[1:]]<line_sep>self.rm.before_call()<line_sep>loc=self.rm.after_call(op)<line_sep>dst_locs=[r1 r2 r3][:len(src_locs)]<line_sep>remap_frame_layout(self.assembler src_locs dst_locs r8)<line_sep>emit(opname loc dst_locs)<block_end><elif_stmt>opname<eq>"label"<block_start>descr=op.getdescr()<line_sep>locs=[self.loc(x)<for>x op.getarglist()]<line_sep>emit(opname locs)<line_sep>descr._fake_arglocs=locs<line_sep>lastop=loop.operations[-1]<if_stmt>lastop.getopname()<eq>"jump"<and>lastop.getdescr()<is>descr# now we know the places, add hints <block_start><for_stmt>i,r enumerate(locs)<block_start><if_stmt>isinstance(r FakeReg)<block_start>self.longevity.fixed_register(len(loop.operations)-1 r lastop.getarg(i))<block_end><block_end><block_end><block_end><elif_stmt>opname<eq>"jump"<block_start>src_locs=[self.loc(x)<for>x op.getarglist()]<line_sep>dst_locs=op.getdescr()._fake_arglocs<line_sep>remap_frame_layout(self.assembler src_locs dst_locs r8)<line_sep>emit("jump" dst_locs)<block_end><else_stmt><block_start>locs=[self.loc(x)<for>x op.getarglist()]<if_stmt>op.type<ne>"v"<block_start>loc=self.rm.force_allocate_reg(op)<line_sep>emit(opname loc locs)<block_end><else_stmt><block_start>emit(opname locs)<block_end><block_end>self.possibly_free_vars_for_op(op)<block_end><return>self.assembler.emitted<block_end><def_stmt>_add_fixed_registers self<block_start><for_stmt>i,op enumerate(self.operations)<block_start>opnum=op.getopnum()<line_sep>opname=op.getopname()<line_sep>args=op.getarglist()<if_stmt>rop.is_call(opnum)# calling convention! <block_start>arglist=op.getarglist()[1:]<for_stmt>arg,reg zip(arglist+[<none>]<times>(3-len(arglist)) [r1 r2 r3])<block_start>self.longevity.fixed_register(i reg arg)<block_end>self.longevity.fixed_register(i r0 op)<block_end><elif_stmt>opname.startswith("int_")<block_start><if_stmt><not>args[0].is_constant()<block_start>self.longevity.try_use_same_register(args[0] op)<block_end><block_end><block_end><block_end><block_end>CPU=getcpuclass()<class_stmt>TestFullRegallocFakeCPU(object)# XXX copy-paste from test_regalloc_integration <block_start>cpu=CPU(<none> <none>)<line_sep>cpu.setup_once()<line_sep>targettoken=TargetToken()<line_sep>targettoken2=TargetToken()<line_sep>fdescr1=BasicFailDescr(1)<line_sep>fdescr2=BasicFailDescr(2)<line_sep>fdescr3=BasicFailDescr(3)<def_stmt>setup_method self meth<block_start>self.targettoken._ll_loop_code=0<line_sep>self.targettoken2._ll_loop_code=0<block_end><def_stmt>f1 x<block_start><return>x+1<block_end><def_stmt>f2 x y<block_start><return>x<times>y<block_end><def_stmt>f10 *args<block_start><assert_stmt>len(args)<eq>10<line_sep><return>sum(args)<block_end>F1PTR=lltype.Ptr(lltype.FuncType([lltype.Signed] lltype.Signed))<line_sep>F2PTR=lltype.Ptr(lltype.FuncType([lltype.Signed]<times>2 lltype.Signed))<line_sep>F10PTR=lltype.Ptr(lltype.FuncType([lltype.Signed]<times>10 lltype.Signed))<line_sep>f1ptr=llhelper(F1PTR f1)<line_sep>f2ptr=llhelper(F2PTR f2)<line_sep>f10ptr=llhelper(F10PTR f10)<line_sep>f1_calldescr=cpu.calldescrof(F1PTR.TO F1PTR.TO.ARGS F1PTR.TO.RESULT EffectInfo.MOST_GENERAL)<line_sep>f2_calldescr=cpu.calldescrof(F2PTR.TO F2PTR.TO.ARGS F2PTR.TO.RESULT EffectInfo.MOST_GENERAL)<line_sep>f10_calldescr=cpu.calldescrof(F10PTR.TO F10PTR.TO.ARGS F10PTR.TO.RESULT EffectInfo.MOST_GENERAL)<line_sep>namespace=locals().copy()<def_stmt>parse self s boxkinds=<none> namespace=<none><block_start><return>parse(s self.cpu namespace<or>self.namespace boxkinds=boxkinds)<block_end><def_stmt>allocate self s inputarg_locs=<none><block_start>loop=self.parse(s)<line_sep>self.loop=loop<line_sep>regalloc=FakeRegalloc()<line_sep>regalloc.fake_prepare_loop(loop.inputargs loop.operations loop.original_jitcell_token inputarg_locs)<line_sep>self.regalloc=regalloc<line_sep><return>regalloc.fake_allocate(loop)<block_end><def_stmt>test_simple self<block_start>ops=''' [i0] label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_lt(i1, 20) guard_true(i2) [i1] jump(i1, descr=targettoken) '''<line_sep>emitted=self.allocate(ops)<line_sep>fp0=FakeFramePos(0 INT)<assert_stmt>emitted<eq>[("label" [fp0]) ("move" r0 fp0) ("int_add" r0 [1]) ("int_lt" r8 [r0 20]) ("guard_true" r8 [r0]) ("move" fp0 r0) ("jump" [fp0]) ]<block_end><def_stmt>test_call self<block_start>ops=''' [i0] i1 = int_mul(i0, 2) i2 = call_i(ConstClass(f1ptr), i1, descr=f1_calldescr) guard_false(i2) [] '''<line_sep>emitted=self.allocate(ops)<line_sep>fp0=FakeFramePos(0 INT)<assert_stmt>emitted<eq>[("move" r1 fp0) ("int_mul" r1 [2]) ("call_i" r0 [r1]) ("guard_false" r0 []) ]<block_end><def_stmt>test_call_2 self<block_start>ops=''' [i0, i1] i2 = int_mul(i0, 2) i3 = int_add(i1, 1) i4 = call_i(ConstClass(f1ptr), i2, descr=f1_calldescr) guard_false(i4) [i3] '''<line_sep>emitted=self.allocate(ops)<line_sep>fp0=FakeFramePos(0 INT)<line_sep>fp1=FakeFramePos(1 INT)<assert_stmt>emitted<eq>[("move" r1 fp0) ("int_mul" r1 [2]) ("move" r4 fp1) # r4 gets picked since it's callee-saved ("int_add" r4 [1]) ("call_i" r0 [r1]) ("guard_false" r0 [r4]) ]<block_end><def_stmt>test_coalescing self<block_start>ops=''' [i0] i1 = int_mul(i0, 5) i5 = int_is_true(i1) guard_true(i5) [] i2 = int_mul(i0, 2) i3 = int_add(i2, 1) # i2 and i3 need to be coalesced i4 = call_i(ConstClass(f1ptr), i3, descr=f1_calldescr) guard_false(i4) [] '''<line_sep>emitted=self.allocate(ops)<line_sep>fp0=FakeFramePos(0 INT)<assert_stmt>emitted<eq>[('move' r1 fp0) ('int_mul' r1 [5]) ('int_is_true' r8 [r1]) ('guard_true' r8 []) ('move' r1 fp0) ('int_mul' r1 [2]) ('int_add' r1 [1]) ('call_i' r0 [r1]) ('guard_false' r0 [])]<block_end><def_stmt>test_specify_inputarg_locs self<block_start>ops=''' [i0] i1 = int_mul(i0, 5) i5 = int_is_true(i1) guard_true(i5) [] '''<line_sep>emitted=self.allocate(ops [r0])<assert_stmt>emitted<eq>[('int_mul' r0 [5]) ('int_is_true' r8 [r0]) ('guard_true' r8 [])]<block_end><def_stmt>test_coalescing_first_var_already_in_different_reg self<block_start>ops=''' [i0] i2 = int_mul(i0, 2) i3 = int_add(i2, 1) # i2 and i3 need to be coalesced i4 = call_i(ConstClass(f1ptr), i3, descr=f1_calldescr) guard_false(i4) [i0] '''<line_sep>emitted=self.allocate(ops [r5])<assert_stmt>emitted<eq>[('move' r1 r5) ('int_mul' r1 [2]) ('int_add' r1 [1]) ('call_i' r0 [r1]) ('guard_false' r0 [r5])]<block_end><def_stmt>test_call_spill_furthest_use self# here, i2 should be spilled, because its use is farther away <block_start>ops=''' [i0, i1, i2, i3, i4, i5, i6] i8 = call_i(ConstClass(f2ptr), i0, i1, descr=f2_calldescr) escape_i(i3) escape_i(i2) guard_false(i8) [i2, i3, i4, i5, i6] '''<line_sep>emitted=self.allocate(ops [r1 r2 r0 r3 r4 r5 r6])<line_sep>fp0=FakeFramePos(0 INT)<assert_stmt>emitted<eq>[('move' fp0 r0) ('move' r7 r3) ('call_i' r0 [r1 r2]) ('escape_i' r1 [r7]) ('escape_i' r1 [fp0]) ('guard_false' r0 [fp0 r7 r4 r5 r6])]<block_end>@py.test.mark.skip("messy - later")<def_stmt>test_call_spill self# i0 dies, i1 is the argument, the other fight for caller-saved regs # all_regs = [r0, r1, r2, r3, r4, r5, r6, r7] # save_around_call_regs = [r0, r1, r2, r3] <block_start>ops=''' [i0, i1, i2, i3, i4, i5, i6] i8 = call_i(ConstClass(f2ptr), i1, i0, descr=f2_calldescr) guard_false(i8) [i2, i3, i4, i5, i6] '''<line_sep>emitted=self.allocate(ops [r5 r1 r0 r2 r3 r6 r7])<assert_stmt>emitted<eq>["???"]<block_end><def_stmt>test_jump_hinting self<block_start>ops=''' [i0, i1] i2 = escape_i() i3 = escape_i() label(i2, i3, descr=targettoken) i4 = escape_i() i5 = escape_i() jump(i4, i5, descr=targettoken) '''<line_sep>emitted=self.allocate(ops)<assert_stmt>emitted<eq>[('escape_i' r0 []) ('escape_i' r1 []) ('label' [r0 r1]) ('escape_i' r0 []) ('escape_i' r1 []) ('jump' [r0 r1])]<block_end><block_end>
# Copyright 2017. <NAME>. All rights reserved # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following # disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # <import_stmt>os<import_stmt>sys<import_stmt>h5py<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<line_sep>MAGIC_ATTR='magic'<line_sep>MAGIC_VAL=0x0A7A<line_sep>VERSION_ATTR='version'<line_sep>VERSION_NA='NA'<line_sep>VERSION_CURRENT='0.1'<try_stmt><block_start>ver_split=VERSION_CURRENT.split('.')<line_sep>VERSION_MAJOR=ver_split[0]<line_sep>VERSION_MINOR=ver_split[1]<block_end><except_stmt>(IndexError AttributeError)<as>err<block_start>VERSION_MAJOR=0<line_sep>VERSION_MINOR=1<block_end><def_stmt>listify files# TODO: change this to include any iterable datastructures (sets, panda sequences, etc) <block_start><if_stmt><not>isinstance(files (list tuple))<block_start><return>[files]<block_end><else_stmt><block_start><return>files<block_end><block_end><def_stmt>load_h5 h5file mode='r'# TODO: Allow for h5py.Group also <block_start><if_stmt>isinstance(h5file h5py.File)<block_start><return>h5file<block_end><return>h5py.File(h5file mode)<block_end><def_stmt>load_csv csvfile# TODO: make the separator more flexible <block_start><if_stmt>isinstance(csvfile pd.DataFrame)<block_start><return>csvfile<block_end># TODO: check if it is csv object and convert to a pd dataframe <return>pd.read_csv(csvfile sep=' ' na_values='NONE')<block_end><def_stmt>get_attribute_h5 h5obj attribut_name default=<none><block_start>val=h5obj.attrs.get(attribut_name default)<if_stmt>using_py3<and>isinstance(val bytes)# There is an but with h5py returning unicode/str based attributes as bytes <block_start>val=val.decode()<block_end><return>val<block_end><def_stmt>check_magic hdf5_file<block_start>"""Check the magic attribute exists according to the sonata format"""<line_sep>h5_file_obj=load_h5(hdf5_file)<if_stmt>MAGIC_ATTR<not><in>h5_file_obj.attrs<block_start><raise>Exception('File {} missing top-level \"{}\" attribute.'.format(h5_file_obj.filename MAGIC_ATTR))<block_end><elif_stmt>np.uint32(get_attribute_h5(hdf5_file MAGIC_ATTR))<ne>MAGIC_VAL<block_start><raise>Exception('File {} has unexpected magic value (expected {})'.format(h5_file_obj.filename MAGIC_VAL))<block_end><return><true><block_end><def_stmt>get_version hdf5_file<block_start>h5_file_obj=load_h5(hdf5_file)<if_stmt>VERSION_ATTR<not><in>h5_file_obj.attrs<block_start><return>VERSION_NA<block_end><else_stmt><block_start>version_val=get_attribute_h5(h5_file_obj VERSION_ATTR)<line_sep>version_str=str(version_val[0])<for_stmt>ver_sub version_val[1:]<block_start>version_str<augadd>'.{}'.format(ver_sub)<block_end><return>version_str<block_end><block_end><def_stmt>add_hdf5_magic hdf5_handle<block_start>hdf5_handle['/'].attrs['magic']=np.uint32(0x0A7A)<block_end><def_stmt>add_hdf5_version hdf5_handle<block_start>hdf5_handle['/'].attrs['version']=[np.uint32(VERSION_MAJOR) np.uint32(VERSION_MINOR)]<block_end><def_stmt>get_node_ids nodes_path population# Used by PoissonSpikesGenerator <block_start><with_stmt>h5py.File(nodes_path 'r')<as>h5<block_start>node_ids=h5['/nodes'][population]['node_id'][()]<block_end><return>node_ids<block_end><if_stmt>sys.version_info[0]<eq>3<block_start>using_py3=<true><line_sep>range_itr=range<block_end><else_stmt><block_start>using_py3=<false><line_sep>range_itr=xrange<block_end>
<import_stmt>unittest<import_stmt>jionlp<as>jio<class_stmt>TestTextAug(unittest.TestCase)<block_start>""" 测试文本数据增强工具 """<def_stmt>test_ReplaceEntity self<block_start>""" test class ReplaceEntity """<line_sep># 准备的词典 entities_dict={"Person":{"马成宇":1} "Company":{"百度":4 "国力教育公司":1} "Organization":{"延平区人民法院":1}}<line_sep># 输入的序列标注样本 text='腾讯致力于解决冲突,阿里巴巴致力于玩。小马爱玩。'<line_sep>entities=[{'type':'Company' 'text':'腾讯' 'offset':(0 2)} {'type':'Company' 'text':'阿里巴巴' 'offset':(10 14)} {'type':'Person' 'text':'小马' 'offset':(19 21)}]<line_sep>replace_entity=jio.ReplaceEntity(entities_dict)<line_sep>texts,entities=replace_entity(text entities)<line_sep># 预期结果 standard_texts=['腾讯致力于解决冲突,国力教育公司致力于玩。小马爱玩。' '百度致力于解决冲突,阿里巴巴致力于玩。小马爱玩。' '腾讯致力于解决冲突,阿里巴巴致力于玩。马成宇爱玩。']<line_sep>standard_entities=[[{'type':'Company' 'text':'腾讯' 'offset':(0 2)} {'text':'国力教育公司' 'type':'Company' 'offset':[10 16]} {'text':'小马' 'type':'Person' 'offset':(21 23)}] [{'text':'百度' 'type':'Company' 'offset':[0 2]} {'text':'阿里巴巴' 'type':'Company' 'offset':(10 14)} {'text':'小马' 'type':'Person' 'offset':(19 21)}] [{'type':'Company' 'text':'腾讯' 'offset':(0 2)} {'type':'Company' 'text':'阿里巴巴' 'offset':(10 14)} {'text':'马成宇' 'type':'Person' 'offset':[19 22]}]]<line_sep>self.assertEqual(texts standard_texts)<line_sep>self.assertEqual(entities standard_entities)<block_end># def test_ <block_end>
<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>time<import_stmt>sys<import_stmt>os<import_stmt>shutil<import_stmt>csv<import_stmt>collections<import_stmt>subprocess<import_stmt>sys<line_sep>subprocess.check_call([sys.executable '-m' 'pip' 'install' 'pip' '--upgrade'])<line_sep>subprocess.check_call([sys.executable '-m' 'pip' 'install' 'wrapt' '--upgrade' '--ignore-installed'])<line_sep>subprocess.check_call([sys.executable '-m' 'pip' 'install' 'tensorflow==2.1.0' '--ignore-installed'])<import_stmt>tensorflow<as>tf<line_sep>print(tf.__version__)<line_sep>subprocess.check_call([sys.executable '-m' 'pip' 'install' 'transformers==2.8.0'])<import_from_stmt>transformers DistilBertTokenizer<import_stmt>pyspark<import_from_stmt>pyspark.sql SparkSession<import_from_stmt>pyspark.ml Pipeline<import_from_stmt>pyspark.sql.functions *<import_from_stmt>pyspark.ml.linalg DenseVector<import_from_stmt>pyspark.sql.functions split<import_from_stmt>pyspark.sql.functions udf col<import_from_stmt>pyspark.sql.types *<line_sep>tokenizer=DistilBertTokenizer.from_pretrained('distilbert-base-uncased')<line_sep># We set sequences to be at most 128 tokens long. MAX_SEQ_LENGTH=64<line_sep>DATA_COLUMN='review_body'<line_sep>LABEL_COLUMN='star_rating'<line_sep>LABEL_VALUES=[1 2 3 4 5]<line_sep>label_map={}<for_stmt>(i label) enumerate(LABEL_VALUES)<block_start>label_map[label]=i<block_end><class_stmt>InputFeatures(object)<block_start>"""BERT feature vectors."""<def_stmt>__init__ self input_ids input_mask segment_ids label_id<block_start>self.input_ids=input_ids<line_sep>self.input_mask=input_mask<line_sep>self.segment_ids=segment_ids<line_sep>self.label_id=label_id<block_end><block_end><class_stmt>Input(object)<block_start>"""A single training/test input for sequence classification."""<def_stmt>__init__ self text label=<none><block_start>"""Constructs an Input. Args: text: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """<line_sep>self.text=text<line_sep>self.label=label<block_end><block_end><def_stmt>convert_input label text# First, we need to preprocess our data so that it matches the data BERT was trained on: # # 1. Lowercase our text (if we're using a BERT lowercase model) # 2. Tokenize it (i.e. "sally says hi" -> ["sally", "says", "hi"]) # 3. Break words into WordPieces (i.e. "calling" -> ["call", "##ing"]) # # Fortunately, the Transformers tokenizer does this for us! # # tokens = tokenizer.tokenize(text_input.text) # Next, we need to do the following: # # 4. Map our words to indexes using a vocab file that BERT provides # 5. Add special "CLS" and "SEP" tokens (see the [readme](https://github.com/google-research/bert)) # 6. Append "index" and "segment" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf)) # # Again, the Transformers tokenizer does this for us! # <block_start>encode_plus_tokens=tokenizer.encode_plus(text pad_to_max_length=<true> max_length=MAX_SEQ_LENGTH)<line_sep># Convert the text-based tokens to ids from the pre-trained BERT vocabulary input_ids=encode_plus_tokens['input_ids']<line_sep># Specifies which tokens BERT should pay attention to (0 or 1) input_mask=encode_plus_tokens['attention_mask']<line_sep># Segment Ids are always 0 for single-sequence tasks (or 1 if two-sequence tasks) segment_ids=[0]<times>MAX_SEQ_LENGTH<line_sep># Label for our training data (star_rating 1 through 5) label_id=label_map[label]<line_sep><return>{'input_ids':input_ids 'input_mask':input_mask 'segment_ids':segment_ids 'label_ids':[label_id]}<block_end><def_stmt>list_arg raw_value<block_start>"""argparse type for a list of strings"""<line_sep><return>str(raw_value).split(',')<block_end><def_stmt>parse_args # Unlike SageMaker training jobs (which have `SM_HOSTS` and `SM_CURRENT_HOST` env vars), processing jobs to need to parse the resource config file directly <block_start>resconfig={}<try_stmt><block_start><with_stmt>open('/opt/ml/config/resourceconfig.json' 'r')<as>cfgfile<block_start>resconfig=json.load(cfgfile)<block_end><block_end><except_stmt>FileNotFoundError<block_start>print('/opt/ml/config/resourceconfig.json not found. current_host is unknown.')<line_sep><pass><block_end># Ignore # Local testing with CLI args parser=argparse.ArgumentParser(description='Process')<line_sep>parser.add_argument('--hosts' type=list_arg default=resconfig.get('hosts' ['unknown']) help='Comma-separated list of host names running the job')<line_sep>parser.add_argument('--current-host' type=str default=resconfig.get('current_host' 'unknown') help='Name of this host running the job')<line_sep>parser.add_argument('--input-data' type=str default='/opt/ml/processing/input/data' )<line_sep>parser.add_argument('--output-data' type=str default='/opt/ml/processing/output' )<line_sep><return>parser.parse_args()<block_end><def_stmt>transform spark s3_input_data s3_output_train_data s3_output_validation_data s3_output_test_data<block_start>print('Processing {} => {}'.format(s3_input_data s3_output_train_data s3_output_validation_data s3_output_test_data))<line_sep>schema=StructType([StructField('marketplace' StringType() <true>) StructField('customer_id' StringType() <true>) StructField('review_id' StringType() <true>) StructField('product_id' StringType() <true>) StructField('product_parent' StringType() <true>) StructField('product_title' StringType() <true>) StructField('product_category' StringType() <true>) StructField('star_rating' IntegerType() <true>) StructField('helpful_votes' IntegerType() <true>) StructField('total_votes' IntegerType() <true>) StructField('vine' StringType() <true>) StructField('verified_purchase' StringType() <true>) StructField('review_headline' StringType() <true>) StructField('review_body' StringType() <true>) StructField('review_date' StringType() <true>)])<line_sep>df_csv=spark.read.csv(path=s3_input_data sep='\t' schema=schema header=<true> quote=<none>)<line_sep>df_csv.show()<line_sep># This dataset should already be clean, but always good to double-check print('Showing null review_body rows...')<line_sep>df_csv.where(col('review_body').isNull()).show()<line_sep>print('Showing cleaned csv')<line_sep>df_csv_dropped=df_csv.na.drop(subset=['review_body'])<line_sep>df_csv_dropped.show()<line_sep># TODO: Balance features_df=df_csv_dropped.select(['star_rating' 'review_body'])<line_sep>features_df.show()<line_sep>tfrecord_schema=StructType([StructField("input_ids" ArrayType(IntegerType() <false>)) StructField("input_mask" ArrayType(IntegerType() <false>)) StructField("segment_ids" ArrayType(IntegerType() <false>)) StructField("label_ids" ArrayType(IntegerType() <false>))])<line_sep>bert_transformer=udf(<lambda>text label:convert_input(text label) tfrecord_schema)<line_sep>spark.udf.register('bert_transformer' bert_transformer)<line_sep>transformed_df=features_df.select(bert_transformer('star_rating' 'review_body').alias('tfrecords'))<line_sep>transformed_df.show(truncate=<false>)<line_sep>flattened_df=transformed_df.select('tfrecords.*')<line_sep>flattened_df.show()<line_sep># Split 90-5-5% train_df,validation_df,test_df=flattened_df.randomSplit([0.9 0.05 0.05])<line_sep>train_df.write.format('tfrecords').option('recordType' 'Example').save(path=s3_output_train_data)<line_sep>print('Wrote to output file: {}'.format(s3_output_train_data))<line_sep>validation_df.write.format('tfrecords').option('recordType' 'Example').save(path=s3_output_validation_data)<line_sep>print('Wrote to output file: {}'.format(s3_output_validation_data))<line_sep>test_df.write.format('tfrecords').option('recordType' 'Example').save(path=s3_output_test_data)<line_sep>print('Wrote to output file: {}'.format(s3_output_test_data))<line_sep>restored_test_df=spark.read.format('tfrecords').option('recordType' 'Example').load(path=s3_output_test_data)<line_sep>restored_test_df.show()<block_end><def_stmt>main <block_start>spark=SparkSession.builder.appName('AmazonReviewsSparkProcessor').getOrCreate()<line_sep># Convert command line args into a map of args args_iter=iter(sys.argv[1:])<line_sep>args=dict(zip(args_iter args_iter))<line_sep># Retrieve the args and replace 's3://' with 's3a://' (used by Spark) s3_input_data=args['s3_input_data'].replace('s3://' 's3a://')<line_sep>print(s3_input_data)<line_sep>s3_output_train_data=args['s3_output_train_data'].replace('s3://' 's3a://')<line_sep>print(s3_output_train_data)<line_sep>s3_output_validation_data=args['s3_output_validation_data'].replace('s3://' 's3a://')<line_sep>print(s3_output_validation_data)<line_sep>s3_output_test_data=args['s3_output_test_data'].replace('s3://' 's3a://')<line_sep>print(s3_output_test_data)<line_sep>transform(spark s3_input_data '/opt/ml/processing/output/bert/train' '/opt/ml/processing/output/bert/validation' '/opt/ml/processing/output/bert/test' # s3_output_train_data, s3_output_validation_data, s3_output_test_data )<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# -*- coding: utf-8 -*- """ ============================= Plotting text ============================= To plot text, simply pass the text data to the plot function. By default, the text samples will be transformed into a vector of word counts and then modeled using Latent Dirichlet Allocation (# of topics = 100) using a model fit to a large sample of wikipedia pages. If you specify semantic=None, the word count vectors will be plotted. To convert the text t0 a matrix (or list of matrices), we also expose the format_data function. """<line_sep># Code source: <NAME> # License: MIT # load hypertools <import_stmt>hypertools<as>hyp<line_sep># load the data data=[['i like cats alot' 'cats r pretty cool' 'cats are better than dogs'] ['dogs rule the haus' 'dogs are my jam' 'dogs are a mans best friend'] 'i haz a cheezeburger?']<line_sep># plot it hyp.plot(data 'o')<line_sep># convert text to matrix without plotting # mtx = hyp.tools.format_data(data, vectorizer='TfidfVectorizer', semantic='NMF')
<import_stmt>esphome.codegen<as>cg<import_stmt>esphome.config_validation<as>cv<import_from_stmt>esphome.components binary_sensor<import_from_stmt>esphome.const CONF_LIGHT CONF_MOTION CONF_TIMEOUT DEVICE_CLASS_LIGHT DEVICE_CLASS_MOTION CONF_ID <import_from_stmt>esphome.core TimePeriod<import_from_stmt>. XiaomiRTCGQ02LM<line_sep>DEPENDENCIES=["xiaomi_rtcgq02lm"]<line_sep>CONF_BUTTON="button"<line_sep>CONFIG_SCHEMA=cv.Schema({cv.GenerateID():cv.use_id(XiaomiRTCGQ02LM) cv.Optional(CONF_MOTION):binary_sensor.binary_sensor_schema(device_class=DEVICE_CLASS_MOTION).extend({cv.Optional(CONF_TIMEOUT default="5s"):cv.All(cv.positive_time_period_milliseconds cv.Range(max=TimePeriod(milliseconds=65535)) ) }) cv.Optional(CONF_LIGHT):binary_sensor.binary_sensor_schema(device_class=DEVICE_CLASS_LIGHT) cv.Optional(CONF_BUTTON):binary_sensor.binary_sensor_schema().extend({cv.Optional(CONF_TIMEOUT default="200ms"):cv.All(cv.positive_time_period_milliseconds cv.Range(max=TimePeriod(milliseconds=65535)) ) }) })<async_keyword><def_stmt>to_code config<block_start>parent=<await>cg.get_variable(config[CONF_ID])<if_stmt>CONF_MOTION<in>config<block_start>sens=<await>binary_sensor.new_binary_sensor(config[CONF_MOTION])<line_sep>cg.add(parent.set_motion(sens))<line_sep>cg.add(parent.set_motion_timeout(config[CONF_MOTION][CONF_TIMEOUT]))<block_end><if_stmt>CONF_LIGHT<in>config<block_start>sens=<await>binary_sensor.new_binary_sensor(config[CONF_LIGHT])<line_sep>cg.add(parent.set_light(sens))<block_end><if_stmt>CONF_BUTTON<in>config<block_start>sens=<await>binary_sensor.new_binary_sensor(config[CONF_BUTTON])<line_sep>cg.add(parent.set_button(sens))<line_sep>cg.add(parent.set_button_timeout(config[CONF_BUTTON][CONF_TIMEOUT]))<block_end><block_end>
<import_stmt>mastermind.uri<as>uri<def_stmt>test_is_template <block_start><assert_stmt>uri.is_template("http://localhost:8000")<is><false><assert_stmt>uri.is_template("http://localhost:8000/{a}/")<is><true><block_end><def_stmt>test_eq <block_start><assert_stmt>uri.eq("http://localhost:8000" "http://localhost:8000")<assert_stmt>uri.eq("https://localhost" "https://localhost")<assert_stmt><not>uri.eq("https://localhost" "http://localhost:443")<assert_stmt><not>uri.eq("https://localhost:9443" "http://localhost:9443")<assert_stmt><not>uri.eq("http://localhost/foo" "http://localhost/foo?q=1")<assert_stmt><not>uri.eq("http://localhost/{var}" "http://localhost/{var}")<assert_stmt>uri.eq("http://localhost/{var}" "http://localhost/value")<assert_stmt>uri.eq("http://localhost/{?q,p}" "http://localhost/?p=1")<block_end><def_stmt>test_expand_template1 <block_start>expected="http://example.org/value"<assert_stmt>uri.expand_template("http://example.org/{var}" "http://example.org/value")<eq>expected<block_end><def_stmt>test_expand_template2 <block_start>expected="http://example.org/value?q=1"<assert_stmt>uri.expand_template("http://example.org/{var}{?q}" "http://example.org/value?q=1")<eq>expected<block_end><def_stmt>test_expand_template3 <block_start>expected="http://example.org/?q=1"<assert_stmt>uri.expand_template("http://example.org/{?q,p}" "http://example.org?q=1")<eq>expected<block_end><def_stmt>test_query_pairs <block_start><assert_stmt>uri.query_pairs("")<eq>[]<assert_stmt>uri.query_pairs("q=1")<eq>[("q" "1")]<assert_stmt>uri.query_pairs("q=1&p=2")<eq>[("q" "1") ("p" "2")]<block_end><def_stmt>test_path_segments <block_start><assert_stmt>uri.path_segments("")<eq>[]<assert_stmt>uri.path_segments("/")<eq>[]<assert_stmt>uri.path_segments("/foo")<eq>["foo"]<assert_stmt>uri.path_segments("/foo/bar")<eq>["foo" "bar"]<assert_stmt>uri.path_segments("/foo/bar/baz")<eq>["foo" "bar" "baz"]<block_end>
<import_stmt>unittest<import_from_stmt>asyncio sleep <import_from_stmt>unittest.mock AsyncMock call <import_from_stmt>minos.networks BrokerMessageV1 BrokerMessageV1Payload BrokerPublisher InMemoryBrokerPublisher InMemoryBrokerPublisherQueue QueuedBrokerPublisher <class_stmt>TestQueuedBrokerPublisher(unittest.IsolatedAsyncioTestCase)<block_start><def_stmt>setUp self<arrow><none><block_start>self.impl=InMemoryBrokerPublisher()<line_sep>self.queue=InMemoryBrokerPublisherQueue()<block_end><def_stmt>test_is_subclass self<block_start>self.assertTrue(issubclass(QueuedBrokerPublisher BrokerPublisher))<block_end><def_stmt>test_impl self<block_start>publisher=QueuedBrokerPublisher(self.impl self.queue)<line_sep>self.assertEqual(self.impl publisher.impl)<block_end><def_stmt>test_queue self<block_start>publisher=QueuedBrokerPublisher(self.impl self.queue)<line_sep>self.assertEqual(self.queue publisher.queue)<block_end><async_keyword><def_stmt>test_setup_destroy self<block_start>impl_setup_mock=AsyncMock()<line_sep>impl_destroy_mock=AsyncMock()<line_sep>queue_setup_mock=AsyncMock()<line_sep>queue_destroy_mock=AsyncMock()<line_sep>self.impl.setup=impl_setup_mock<line_sep>self.impl.destroy=impl_destroy_mock<line_sep>self.queue.setup=queue_setup_mock<line_sep>self.queue.destroy=queue_destroy_mock<async_keyword><with_stmt>QueuedBrokerPublisher(self.impl self.queue)<block_start>self.assertEqual(1 impl_setup_mock.call_count)<line_sep>self.assertEqual(0 impl_destroy_mock.call_count)<line_sep>self.assertEqual(1 queue_setup_mock.call_count)<line_sep>self.assertEqual(0 queue_destroy_mock.call_count)<line_sep>impl_setup_mock.reset_mock()<line_sep>impl_destroy_mock.reset_mock()<line_sep>queue_setup_mock.reset_mock()<line_sep>queue_destroy_mock.reset_mock()<block_end>self.assertEqual(0 impl_setup_mock.call_count)<line_sep>self.assertEqual(1 impl_destroy_mock.call_count)<line_sep>self.assertEqual(0 queue_setup_mock.call_count)<line_sep>self.assertEqual(1 queue_destroy_mock.call_count)<block_end><async_keyword><def_stmt>test_send self<block_start>queue_enqueue_mock=AsyncMock()<line_sep>self.queue.enqueue=queue_enqueue_mock<line_sep>publisher=QueuedBrokerPublisher(self.impl self.queue)<line_sep>message=BrokerMessageV1("foo" BrokerMessageV1Payload("bar"))<line_sep><await>publisher.send(message)<line_sep>self.assertEqual([call(message)] queue_enqueue_mock.call_args_list)<block_end><async_keyword><def_stmt>test_run self<block_start>messages=[BrokerMessageV1("foo" BrokerMessageV1Payload("bar")) BrokerMessageV1("bar" BrokerMessageV1Payload("foo")) ]<line_sep>impl_send_mock=AsyncMock()<line_sep>self.impl.send=impl_send_mock<async_keyword><with_stmt>QueuedBrokerPublisher(self.impl self.queue)<as>publisher<block_start><await>publisher.send(messages[0])<line_sep><await>publisher.send(messages[1])<line_sep><await>sleep(0.5)<block_end># To give time to consume the message self.assertEqual([call(messages[0]) call(messages[1])] impl_send_mock.call_args_list)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# coding=utf-8 # Copyright 2021 The IDEA Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ T5Tokenizer """<import_from_stmt>transformers BertTokenizer<class_stmt>T5Tokenizer()<block_start><def_stmt>__init__ self extra_id_num=118<block_start>self.extra_id_num=extra_id_num<block_end>@classmethod<def_stmt>from_pretrained self vocab_path<block_start>self.extra_id_num=118<line_sep>self.T5_special_tokens=['[BOS]' '[EOS]']<for_stmt>i range(self.extra_id_num)<block_start>self.T5_special_tokens.append(f'<extra_id_{str(i)}>')<block_end>tokenizer=BertTokenizer.from_pretrained(vocab_path additional_special_tokens=self.T5_special_tokens)<line_sep><return>tokenizer<block_end><block_end>
<import_from_stmt>setuptools setup<import_stmt>fastentrypoints<line_sep>setup(name='dummypkg' version='0.0.0' py_modules=['dummy'] description='dummy package for the test' entry_points={'console_scripts':['hello=dummy:main']} )<line_sep>
<def_stmt>rotateMatrixby90 ipMat size<block_start>opMat=[[0<for>i range(size)]<for>j range(size)]<for_stmt>i range(size)<block_start><for_stmt>j range(size)<block_start>opMat[j][i]=ipMat[i][j]<block_end><block_end><return>opMat<block_end><def_stmt>reverseMatrix ipMat size<block_start>opMat=[[0<for>i range(size)]<for>j range(size)]<for_stmt>i range(size)<block_start><for_stmt>j range(size)<block_start>opMat[abs(i-(size-1))][j]=ipMat[i][j]<block_end><block_end><return>opMat<block_end><def_stmt>rotateMatrixby180 ipMat size<block_start>mat_1=rotateMatrixby90(ipMat size)<line_sep>mat_2=reverseMatrix(mat_1 len(mat_1))<line_sep>mat_3=rotateMatrixby90(mat_2 len(mat_2))<line_sep>mat_4=reverseMatrix(mat_3 len(mat_3))<line_sep><return>mat_4<block_end><def_stmt>printMatrix ipMat size<block_start><for_stmt>i range(size)<block_start><for_stmt>j range(size)<block_start>print(ipMat[i][j] end=" ")<block_end>print('\n')<block_end><block_end>matA=[[1 2 3 4] [5 6 7 8] [9 10 11 12] [13 14 15 16]]<line_sep>print("Original-Matrix"+'\n')<line_sep>printMatrix(matA len(matA))<line_sep>print("Rotated-Matrix"+'\n')<line_sep>rotatedMat=rotateMatrixby90(matA len(matA))<line_sep>printMatrix(rotatedMat len(rotatedMat))<line_sep>matB=[[1 5 9 13] [2 6 10 14] [3 7 11 15] [4 8 12 16]]<line_sep>reverseMat=reverseMatrix(matB len(matB))<line_sep>print("Reverse-Matrix"+'\n')<line_sep>printMatrix(reverseMat len(reverseMat))<line_sep>print("Rotated-180-Matrix"+'\n')<line_sep>rotatedMat180=rotateMatrixby180(matA len(matA))<line_sep>printMatrix(rotatedMat180 len(rotatedMat180))<line_sep>
<import_stmt>factory<import_from_stmt>foundation.value_objects.factories get_dollars<import_from_stmt>auctions.domain.entities Auction<class_stmt>AuctionFactory(factory.Factory)<block_start><class_stmt>Meta<block_start>model=Auction<block_end>id=factory.Sequence(<lambda>n:n)<line_sep>bids=factory.List([])<line_sep>title=factory.Faker("name")<line_sep>starting_price=get_dollars("10.00")<line_sep>ends_at=factory.Faker("future_datetime" end_date="+7d")<line_sep>ended=<false><block_end>
<import_from_stmt>click.testing CliRunner<import_from_stmt>popper __version__<import_from_stmt>popper _version_file<import_from_stmt>popper.commands cmd_version<import_from_stmt>.test_common PopperTest<class_stmt>TestCommandVersion(PopperTest)<block_start><def_stmt>test_version self<block_start>self.assertIsNot("0.0.0" __version__)<with_stmt>self.assertLogs("popper")<as>test<block_start>result=CliRunner().invoke(cmd_version.cli)<line_sep>self.assertTrue(__version__<in>test.output[0])<line_sep>self.assertEqual(0 result.exit_code)<block_end><with_stmt>open(_version_file)<as>f<block_start>self.assertEqual(f"__popper_version__ = '{__version__}'\n" f.read())<block_end><block_end><block_end>
# Copyright (c) 2021, <NAME>. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<import_stmt>cugraph<import_from_stmt>cugraph.generators rmat<def_stmt>generate_edgelist scale edgefactor seed=<none> unweighted=<false> <block_start>""" Returns a cudf DataFrame created using the R-MAT graph generator. The resulting graph is weighted with random values of a uniform distribution from the interval [0, 1) scale is used to determine the number of vertices to be generated (num_verts = 2^scale), which is also used to determine the data type for the vertex ID values in the DataFrame. edgefactor determies the number of edges (num_edges = num_edges*edgefactor) seed, if specified, will be used as the seed to the RNG. unweighted determines if the resulting edgelist will have randomly-generated weightes ranging in value between [0, 1). If True, an edgelist with only 2 columns is returned. """<line_sep>df=rmat(scale (2<power>scale)<times>edgefactor 0.1 0.2 0.3 seed<or>42 clip_and_flip=<false> scramble_vertex_ids=<true> create_using=<none> # return edgelist instead of Graph instance mg=<false>)<if_stmt><not>unweighted<block_start>rng=np.random.default_rng(seed)<line_sep>df["weight"]=rng.random(size=len(df))<block_end><return>df<block_end><def_stmt>read_csv input_csv_file scale<block_start>""" Returns a cudf DataFrame from reading input_csv_file. All input CSV files should be weighted with random values of a uniform distribution from the interval [0, 1) in order to best simulate the output of a Graph500-compliant graph generator. scale is used to determine the data type for the vertex ID values in the DataFrame. (num verts = 2^scale), which is used to determine """<line_sep>vertex_t="int32"<if>scale<le>32<else>"int64"<line_sep>dtypes=[vertex_t vertex_t "float32"]<line_sep>names=["src" "dst" "weight"] <line_sep>chunksize=cugraph.dask.get_chunksize(input_csv_file)<line_sep><return>cudf.read_csv(input_csv_file chunksize=chunksize delimiter=" " #names=names, dtype=dtypes header=<none> )<block_end>################################################################################ # Benchmarked functions # # The "benchmark_name" attr is used by the benchmark infra for reporting and is # set to assign more meaningful names to be displayed in reports. <def_stmt>construct_graph dataframe symmetric=<false><block_start>""" dataframe contains weighted and undirected edges with self loops. Multiple edges will likely be present as well. The returned Graph object must be symmetrized and have self loops removed. """<if_stmt>symmetric<block_start>G=cugraph.Graph()<block_end><else_stmt><block_start>G=cugraph.DiGraph()<block_end><if_stmt>len(dataframe.columns)<g>2<block_start>G.from_cudf_edgelist(dataframe source="src" destination="dst" edge_attr="weight")<line_sep>#G.from_cudf_edgelist( # dataframe, source="0", destination="1", edge_attr="2") <block_end><else_stmt><block_start>G.from_cudf_edgelist(dataframe source="src" destination="dst")<line_sep>#G.from_cudf_edgelist( # dataframe, source="0", destination="1") <block_end><return>G<block_end>construct_graph.benchmark_name="from_cudf_edgelist"<def_stmt>bfs G start<block_start><return>cugraph.bfs(G start=start)<block_end><def_stmt>sssp G start<block_start><return>cugraph.sssp(G source=start)<block_end><def_stmt>wcc G<block_start><return>cugraph.weakly_connected_components(G)<block_end><def_stmt>louvain G<block_start><return>cugraph.louvain(G)<block_end><def_stmt>pagerank G<block_start><return>cugraph.pagerank(G)<block_end><def_stmt>katz G alpha=<none><block_start><return>cugraph.katz_centrality(G alpha)<block_end>################################################################################ # Session-wide setup and teardown <def_stmt>setup *args **kwargs<block_start><return>tuple()<block_end><def_stmt>teardown *args **kwargs<block_start><pass><block_end>
<import_stmt>unittest<import_from_stmt>ds2.tree Tree<class_stmt>TestTree(unittest.TestCase)<block_start><def_stmt>testinit self<block_start>Tree(['root'])<line_sep>Tree([1 [2 [3] [4]] [5 [6] [7] [8]]])<block_end><def_stmt>teststr self<block_start>self.assertEqual(str(Tree([1 [2] [3]])) "1\n 2\n 3")<line_sep>self.assertEqual(str(Tree([1 [2 [3]]])) "1\n 2\n 3")<block_end><def_stmt>testcontains self<block_start>T=Tree([1 [2 [3]]])<line_sep>self.assertTrue(1<in>T)<line_sep>self.assertTrue(2<in>T)<line_sep>self.assertTrue(3<in>T)<line_sep>self.assertFalse(4<in>T)<block_end><def_stmt>testeq self<block_start>A=Tree([1 [2] [3]])<line_sep>B=Tree([1 [2] [3]])<line_sep>C=Tree([1 [3] [2]])<line_sep>D=Tree([1 [2 [3]]])<line_sep>E=Tree([1 [2 [3]]])<line_sep>self.assertTrue(A<eq>B)<line_sep>self.assertTrue(D<eq>E)<line_sep>self.assertFalse(A<eq>C)<line_sep>self.assertFalse(B<eq>C)<line_sep>self.assertFalse(A<eq>D)<block_end><def_stmt>testheight self<block_start>A=Tree([1 [2 [3]]])<line_sep>B=Tree([1 [2] [3] [4]])<line_sep>C=Tree([1 [1 [1 [1 [1 [1]]]]]])<line_sep>self.assertEqual(A.height() 2)<line_sep>self.assertEqual(B.height() 1)<line_sep>self.assertEqual(C.height() 5)<line_sep>self.assertEqual(Tree([1]).height() 0)<block_end><def_stmt>testpreorder self<block_start>A=Tree([1 [2] [3]])<line_sep>B=Tree([1 [3] [2]])<line_sep>C=Tree([1 [2 [3]]])<line_sep>self.assertEqual(list(A.preorder()) [1 2 3])<line_sep>self.assertEqual(list(B.preorder()) [1 3 2])<line_sep>self.assertEqual(list(C.preorder()) [1 2 3])<block_end><def_stmt>testiter self<block_start>A=Tree([4 [5] [6]])<line_sep>B=Tree([1 [3] [2]])<line_sep>C=Tree([1 [2 [3]]])<line_sep>self.assertEqual(list(A) [4 5 6])<line_sep>self.assertEqual(list(B) [1 3 2])<line_sep>self.assertEqual(list(C) [1 2 3])<block_end><def_stmt>testpostorder self<block_start>A=Tree([1 [2] [3]])<line_sep>B=Tree([1 [3] [2]])<line_sep>C=Tree([1 [2 [3]]])<line_sep>self.assertEqual(list(A.postorder()) [2 3 1])<line_sep>self.assertEqual(list(B.postorder()) [3 2 1])<line_sep>self.assertEqual(list(C.postorder()) [3 2 1])<block_end><def_stmt>testlayerorder self<block_start>A=Tree([1 [2] [3]])<line_sep>B=Tree([1 [2 [3]] [4]])<line_sep>C=Tree([1 [2 [3] [4]]])<line_sep>self.assertEqual(list(A.layerorder()) [1 2 3])<line_sep>self.assertEqual(list(B.layerorder()) [1 2 4 3])<line_sep>self.assertEqual(list(C.layerorder()) [1 2 3 4])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. <class_stmt>ExampleData(object)<block_start><def_stmt>__init__ self question:str=<none> is_multi_select:bool=<false> option1:str=<none> option2:str=<none> option3:str=<none> <block_start>self.question=question<line_sep>self.is_multi_select=is_multi_select<line_sep>self.option1=option1<line_sep>self.option2=option2<line_sep>self.option3=option3<block_end><block_end>
""" Example slider widget. """<import_stmt>asyncio<import_from_stmt>nurses_2.app App<import_from_stmt>nurses_2.colors BLUE GREEN BLACK RED ColorPair<import_from_stmt>nurses_2.widgets.text_widget TextWidget<import_from_stmt>nurses_2.widgets.slider Slider<line_sep>GREEN_ON_BLACK=ColorPair.from_colors(GREEN BLACK)<class_stmt>MyApp(App)<block_start><async_keyword><def_stmt>on_start self<block_start>display=TextWidget(size=(2 30))<line_sep>display.add_text("Slider 1 Value:" row=0)<line_sep>display.add_text("Slider 2 Value:" row=1)<line_sep>slider_1=Slider(width=20 pos=(2 0) min=0 max=100 handle_color=BLUE callback=<lambda>value:display.add_text(f"{round(value 3):<10}" row=0 column=16) fill_color=RED default_color_pair=GREEN_ON_BLACK )<line_sep>slider_2=Slider(width=15 pos=(3 0) min=-20 max=50 handle_color=BLUE callback=<lambda>value:display.add_text(f"{round(value 3):<10}" row=1 column=16) fill_color=RED default_color_pair=GREEN_ON_BLACK )<line_sep>self.add_widgets(display slider_1 slider_2)<block_end><block_end>MyApp().run()<line_sep>
<def_stmt>selection_sort nums# This value of i corresponds to how many values were sorted <block_start><for_stmt>i range(len(nums))# We assume that the first item of the unsorted segment is the smallest <block_start>lowest_value_index=i<line_sep># This loop iterates over the unsorted items <for_stmt>j range(i+1 len(nums))<block_start><if_stmt>nums[j]<l>nums[lowest_value_index]<block_start>lowest_value_index=j<block_end><block_end># Swap values of the lowest unsorted element with the first unsorted # element nums[i],nums[lowest_value_index]=nums[lowest_value_index] nums[i]<block_end><block_end># Verify it works random_list_of_nums=[12 8 3 20 11]<line_sep>selection_sort(random_list_of_nums)<line_sep>print(random_list_of_nums)<line_sep>
# Copyright 2009-2017 <NAME>. # This program is distributed under the MIT license. <import_stmt>operator<import_from_stmt>python_toolbox cute_testing<import_from_stmt>python_toolbox logic_tools<import_from_stmt>python_toolbox emitting<import_from_stmt>python_toolbox.nifty_collections OrderedSet FrozenOrderedSet EmittingOrderedSet <class_stmt>BaseOrderedSetTestCase(cute_testing.TestCase)<block_start>__test__=<false><def_stmt>test_operations self<block_start>ordered_set=self.ordered_set_type([5 61 2 7 2])<assert_stmt>type(ordered_set|ordered_set)<eq>type(ordered_set&ordered_set)<eq>type(ordered_set)<block_end><def_stmt>test_bool self<block_start><assert_stmt>bool(self.ordered_set_type({}))<is><false><assert_stmt>bool(self.ordered_set_type({0}))<is><true><assert_stmt>bool(self.ordered_set_type(range(5)))<is><true><block_end><block_end><class_stmt>BaseMutableOrderedSetTestCase(BaseOrderedSetTestCase)<block_start>__test__=<false><def_stmt>test_sort self<block_start>ordered_set=self.ordered_set_type([5 61 2 7 2])<assert_stmt>ordered_set<ne>{5 61 2 7}<line_sep>ordered_set.move_to_end(61)<assert_stmt>list(ordered_set)<eq>[5 2 7 61]<line_sep>ordered_set.sort()<assert_stmt>list(ordered_set)<eq>[2 5 7 61]<line_sep>ordered_set.sort(key=<lambda>x:-x reverse=<true>)<assert_stmt>list(ordered_set)<eq>[2 5 7 61]<block_end><def_stmt>test_mutable self<block_start>ordered_set=self.ordered_set_type(range(4))<assert_stmt>list(ordered_set)<eq>list(range(4))<assert_stmt>len(ordered_set)<eq>4<assert_stmt>1<in>ordered_set<assert_stmt>3<in>ordered_set<assert_stmt>7<not><in>ordered_set<line_sep>ordered_set.add(8)<assert_stmt>list(ordered_set)[-1]<eq>8<line_sep>ordered_set.discard(2)<assert_stmt>2<not><in>ordered_set<assert_stmt>list(reversed(ordered_set))<eq>[8 3 1 0]<assert_stmt>ordered_set.pop()<eq>8<assert_stmt>ordered_set.pop(last=<false>)<eq>0<line_sep>ordered_set.add(7 last=<false>)<assert_stmt>tuple(ordered_set)<eq>(7 1 3)<with_stmt>cute_testing.RaiseAssertor(KeyError)<block_start>ordered_set.remove('meow')<block_end>ordered_set.discard('meow')<line_sep>ordered_set.discard('meow')<line_sep>ordered_set.discard('meow')<assert_stmt>ordered_set|ordered_set<eq>ordered_set<assert_stmt>ordered_set&ordered_set<eq>ordered_set<block_end><block_end><class_stmt>OrderedSetTestCase(BaseMutableOrderedSetTestCase)<block_start>__test__=<true><line_sep>ordered_set_type=OrderedSet<block_end><class_stmt>FrozenOrderedSetTestCase(BaseOrderedSetTestCase)<block_start>__test__=<true><line_sep>ordered_set_type=FrozenOrderedSet<def_stmt>test_frozen self<block_start>frozen_ordered_set=self.ordered_set_type(range(4))<assert_stmt>list(frozen_ordered_set)<eq>list(range(4))<assert_stmt>len(frozen_ordered_set)<eq>4<assert_stmt>1<in>frozen_ordered_set<assert_stmt>3<in>frozen_ordered_set<assert_stmt>7<not><in>frozen_ordered_set<with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.add(8)<block_end><with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.discard(2)<block_end><with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.remove(2)<block_end><with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.clear()<block_end><with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.sort()<block_end><with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.move_to_end(2)<block_end><with_stmt>cute_testing.RaiseAssertor(AttributeError)<block_start>frozen_ordered_set.pop(2)<block_end><assert_stmt>list(frozen_ordered_set)<eq>list(range(4))<block_end><def_stmt>test_hashable self<block_start>d={FrozenOrderedSet(range(1)):1 FrozenOrderedSet(range(2)):2 FrozenOrderedSet(range(3)):3 }<assert_stmt>len(d)<eq>3<assert_stmt>set(d.values())<eq>{1 2 3}<assert_stmt>d[FrozenOrderedSet(range(2))]<eq>2<line_sep>d[FrozenOrderedSet(range(2))]=20<assert_stmt>set(d.values())<eq>{1 20 3}<block_end><block_end><class_stmt>EmittingOrderedSetTestCase(BaseMutableOrderedSetTestCase)<block_start>__test__=<true><line_sep>ordered_set_type=EmittingOrderedSet<def_stmt>test_emitting self<block_start>times_emitted=[0]<def_stmt>increment_times_emitted <block_start>times_emitted[0]<augadd>1<block_end>emitter=emitting.Emitter(outputs=increment_times_emitted)<line_sep>emitting_ordered_set=self.ordered_set_type(range(7) emitter=emitter)<assert_stmt>times_emitted<eq>[0]<line_sep>emitting_ordered_set.add(7)<assert_stmt>times_emitted<eq>[1]<line_sep>emitting_ordered_set.add(7)<assert_stmt>times_emitted<eq>[1]<line_sep>emitting_ordered_set.discard(17)<assert_stmt>times_emitted<eq>[1]<assert_stmt>emitting_ordered_set.get_without_emitter()<eq>OrderedSet(range(8))<line_sep>emitting_ordered_set<augor>(8 9 10)<assert_stmt>times_emitted<eq>[4]<line_sep>emitting_ordered_set<augor>(8 9 10)<assert_stmt>times_emitted<eq>[4]<assert_stmt>emitting_ordered_set.get_without_emitter()<eq>OrderedSet(range(11))<line_sep>emitting_ordered_set.move_to_end(4)<assert_stmt>times_emitted<eq>[5]<assert_stmt>tuple(emitting_ordered_set)<eq>(0 1 2 3 5 6 7 8 9 10 4)<block_end><block_end><def_stmt>test_operations_on_different_types <block_start>x1=OrderedSet(range(0 4))|FrozenOrderedSet(range(2 6))<line_sep>x2=OrderedSet(range(0 4))&FrozenOrderedSet(range(2 6))<line_sep>x3=FrozenOrderedSet(range(0 4))|OrderedSet(range(2 6))<line_sep>x4=FrozenOrderedSet(range(0 4))&OrderedSet(range(2 6))<assert_stmt>type(x1)<eq>OrderedSet<assert_stmt>type(x2)<eq>OrderedSet<assert_stmt>type(x3)<eq>FrozenOrderedSet<assert_stmt>type(x4)<eq>FrozenOrderedSet<assert_stmt>x1<eq>OrderedSet(range(0 6))<assert_stmt>x2<eq>OrderedSet(range(2 4))<assert_stmt>x3<eq>FrozenOrderedSet(range(0 6))<assert_stmt>x4<eq>FrozenOrderedSet(range(2 4))<assert_stmt>logic_tools.all_equivalent((x1 x2 x3 x4) relation=operator.ne)<block_end>
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>src.predictor Predictor<import_from_stmt>src.transforms get_transforms<import_from_stmt>src.utils get_best_model_path gmean_preds_blend<import_from_stmt>src.datasets get_test_data<import_from_stmt>src config<line_sep>EXPERIMENTS=['auxiliary_009' 'auxiliary_010' 'auxiliary_011']<line_sep>DEVICE='cuda'<line_sep>CROP_SIZE=256<line_sep>BATCH_SIZE=16<def_stmt>pred_test predictor test_data<block_start>fname_lst,images_lst=test_data<line_sep>pred_lst=[]<for_stmt>image images_lst<block_start>pred=predictor.predict(image)<line_sep>pred=pred.mean(axis=0)<line_sep>pred_lst.append(pred)<block_end>preds=np.stack(pred_lst axis=0)<line_sep>pred_df=pd.DataFrame(data=preds index=fname_lst columns=config.classes)<line_sep>pred_df.index.name='fname'<line_sep><return>pred_df<block_end><def_stmt>experiment_pred experiment_dir test_data<block_start>print(f"Start predict: {experiment_dir}")<line_sep>transforms=get_transforms(<false> CROP_SIZE)<line_sep>pred_df_lst=[]<for_stmt>fold config.folds<block_start>print("Predict fold" fold)<line_sep>fold_dir=experiment_dir/f'fold_{fold}'<line_sep>model_path=get_best_model_path(fold_dir)<line_sep>print("Model path" model_path)<line_sep>predictor=Predictor(model_path transforms BATCH_SIZE (config.audio.n_mels CROP_SIZE) (config.audio.n_mels CROP_SIZE<floordiv>4) device=DEVICE)<line_sep>pred_df=pred_test(predictor test_data)<line_sep>pred_df_lst.append(pred_df)<block_end>pred_df=gmean_preds_blend(pred_df_lst)<line_sep><return>pred_df<block_end><if_stmt>__name__<eq>"__main__"<block_start>print("Experiments" EXPERIMENTS)<line_sep>test_data=get_test_data()<line_sep>exp_pred_df_lst=[]<for_stmt>experiment EXPERIMENTS<block_start>experiment_dir=config.experiments_dir/experiment<line_sep>exp_pred_df=experiment_pred(experiment_dir test_data)<line_sep>exp_pred_df_lst.append(exp_pred_df)<block_end>blend_pred_df=gmean_preds_blend(exp_pred_df_lst)<line_sep>blend_pred_df.to_csv('submission.csv')<block_end>
<import_stmt>numpy<as>np<import_from_stmt>sklearn model_selection<import_from_stmt>sklearn datasets<import_from_stmt>sklearn.ensemble GradientBoostingRegressor<import_from_stmt>sklearn.metrics mean_squared_error<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<class_stmt>Friedman1Test<block_start>"""This class encapsulates the Friedman1 regression test for feature selection """<line_sep>VALIDATION_SIZE=0.20<line_sep>NOISE=1.0<def_stmt>__init__ self numFeatures numSamples randomSeed<block_start>""" :param numFeatures: total number of features to be used (at least 5) :param numSamples: number of samples in dataset :param randomSeed: random seed value used for reproducible results """<line_sep>self.numFeatures=numFeatures<line_sep>self.numSamples=numSamples<line_sep>self.randomSeed=randomSeed<line_sep># generate test data: self.X,self.y=datasets.make_friedman1(n_samples=self.numSamples n_features=self.numFeatures noise=self.NOISE random_state=self.randomSeed)<line_sep># divide the data to a training set and a validation set: self.X_train,self.X_validation,self.y_train,self.y_validation=model_selection.train_test_split(self.X self.y test_size=self.VALIDATION_SIZE random_state=self.randomSeed)<line_sep>self.regressor=GradientBoostingRegressor(random_state=self.randomSeed)<block_end><def_stmt>__len__ self<block_start>""" :return: the total number of features """<line_sep><return>self.numFeatures<block_end><def_stmt>getMSE self zeroOneList<block_start>""" returns the mean squared error of the regressor, calculated for the validation set, after training using the features selected by the zeroOneList :param zeroOneList: a list of binary values corresponding the features in the dataset. A value of '1' represents selecting the corresponding feature, while a value of '0' means that the feature is dropped. :return: the mean squared error of the regressor when using the features selected by the zeroOneList """<line_sep># drop the columns of the training and validation sets that correspond to the # unselected features: zeroIndices=[i<for>i,n enumerate(zeroOneList)<if>n<eq>0]<line_sep>currentX_train=np.delete(self.X_train zeroIndices 1)<line_sep>currentX_validation=np.delete(self.X_validation zeroIndices 1)<line_sep># train the regression model using th etraining set: self.regressor.fit(currentX_train self.y_train)<line_sep># calculate the regressor's output for the validation set: prediction=self.regressor.predict(currentX_validation)<line_sep># return the mean square error of predicition vs actual data: <return>mean_squared_error(self.y_validation prediction)<block_end><block_end># testing the class: <def_stmt>main # create a test instance: <block_start>test=Friedman1Test(numFeatures=15 numSamples=60 randomSeed=42)<line_sep>scores=[]<line_sep># calculate MSE for 'n' first features: <for_stmt>n range(1 len(test)+1)<block_start>nFirstFeatures=[1]<times>n+[0]<times>(len(test)-n)<line_sep>score=test.getMSE(nFirstFeatures)<line_sep>print("%d first features: score = %f"%(n score))<line_sep>scores.append(score)<block_end># plot graph: sns.set_style("whitegrid")<line_sep>plt.plot([i+1<for>i range(len(test))] scores color='red')<line_sep>plt.xticks(np.arange(1 len(test)+1 1.0))<line_sep>plt.xlabel('n First Features')<line_sep>plt.ylabel('MSE')<line_sep>plt.title('MSE over Features Selected')<line_sep>plt.show()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>pandas<as>pd<import_stmt>numpy<as>numpy<import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers Dense Dropout Activation Flatten Reshape<import_from_stmt>keras.layers Conv1D MaxPooling1D<import_from_stmt>keras.utils np_utils<import_from_stmt>keras.layers LSTM LeakyReLU CuDNNLSTM<import_from_stmt>keras.callbacks CSVLogger ModelCheckpoint<import_stmt>h5py<import_stmt>os<import_stmt>tensorflow<as>tf<import_from_stmt>keras.backend.tensorflow_backend set_session<import_from_stmt>keras regularizers<line_sep>os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'<line_sep>os.environ['CUDA_VISIBLE_DEVICES']='1'<line_sep>os.environ['TF_CPP_MIN_LOG_LEVEL']='2'<line_sep>config=tf.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><line_sep>set_session(tf.Session(config=config))<with_stmt>h5py.File(''.join(['data/bitcoin2015to2017_close.h5']) 'r')<as>hf<block_start>datas=hf['inputs'].value<line_sep>labels=hf['outputs'].value<block_end>step_size=datas.shape[1]<line_sep>units=50<line_sep>second_units=30<line_sep>batch_size=8<line_sep>nb_features=datas.shape[2]<line_sep>epochs=50<line_sep>output_size=16<line_sep>reg=1<line_sep>output_file_name='bitcoin2015to2017_close_LSTM_1_tanh_leaky_areg_l1_'+str(reg)<line_sep>#split training validation training_size=int(0.8<times>datas.shape[0])<line_sep>training_datas=datas[:training_size :]<line_sep>training_labels=labels[:training_size : 0]<line_sep>validation_datas=datas[training_size: :]<line_sep>validation_labels=labels[training_size: : 0]<line_sep>#build model model=Sequential()<line_sep>model.add(CuDNNLSTM(units=units activity_regularizer=regularizers.l1(reg) input_shape=(step_size nb_features) return_sequences=<false>))<line_sep>model.add(Activation('tanh'))<line_sep>model.add(Dropout(0.2))<line_sep>model.add(Dense(output_size))<line_sep>model.add(LeakyReLU())<line_sep>model.compile(loss='mse' optimizer='adam')<line_sep>model.fit(training_datas training_labels batch_size=batch_size validation_data=(validation_datas validation_labels) epochs=epochs callbacks=[CSVLogger(output_file_name+'.csv' append=<true>)])<line_sep># model.fit(datas,labels) #model.save(output_file_name+'.h5')
ori_file='/home/unaguo/hanson/data/landmark/WFLW191104/train_data/300W_LP.txt'<line_sep>save_file='/home/unaguo/hanson/data/landmark/WFLW191104/train_data/300W_LP1.txt'<line_sep>lable='0'<line_sep>ori_lines=[]<with_stmt>open(ori_file 'r')<as>f<block_start>ori_lines=f.readlines()<block_end><with_stmt>open(save_file 'w')<as>f<block_start><for_stmt>line ori_lines<block_start>line=line.strip()<line_sep>new_line='{} {}\n'.format(line lable)<line_sep>f.write(new_line)<block_end><block_end>
# -*- coding: utf-8 -*- {'name':"wechat_mall" 'application':<true> 'summary':u""" 微信小程序商城管理后台""" 'description':u""" 微信小程序商城管理后台 """ 'author':"Gzp" 'website':"http://wechat.elfgzp.cn" # Categories can be used to filter modules in modules listing # Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml # for the full list 'category':'Website' 'version':'0.1' # any module necessary for this one to work correctly 'depends':['base' 'mail' 'website'] # always loaded 'data':['security/wechat_mall_security.xml' 'security/ir.model.access.csv' 'views/parent_menus.xml' # logistics views 'views/logistics/wechat_mall_city_views.xml' 'views/logistics/wechat_mall_district_views.xml' 'views/logistics/wechat_mall_logistics_views.xml' 'views/logistics/wechat_mall_province_views.xml' 'views/logistics/wechat_mall_shipper_views.xml' 'views/logistics/menu_logistics.xml' # order views 'views/order/wechat_mall_order_views.xml' 'views/order/menu_order.xml' # product views 'views/product/wechat_mall_category_views.xml' 'views/product/wechat_mall_goods_views.xml' 'views/product/wechat_mall_subshop_views.xml' 'views/product/menu_product.xml' # setting views 'views/setting/wechat_mall_banner_views.xml' 'views/setting/wechat_mall_config_settings_views.xml' 'views/setting/wechat_mall_user_views.xml' 'views/setting/wechat_mall_address_views.xml' 'views/setting/menu_setting.xml' # other 'views/ir_attachment_view.xml' 'views/wechat_mall_modify_price_wizard_views.xml' 'views/wechat_mall_deliver_wizard_views.xml' 'views/webclient_templates.xml' 'data/order_num_sequence.xml' 'data/payment_num_sequence.xml' 'data/mail_template.xml' ] # only loaded in demonstration mode 'demo':['demo/demo.xml' ] }<line_sep>
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>veriloggen<import_stmt>thread_to_thread_pool<line_sep>expected_verilog=""" module test; reg CLK; reg RST; blinkled uut ( .CLK(CLK), .RST(RST) ); initial begin CLK = 0; forever begin #5 CLK = !CLK; end end initial begin RST = 0; #100; RST = 1; #100; RST = 0; #10000; $finish; end endmodule module blinkled ( input CLK, input RST ); reg [8-1:0] _th_myfunc_a_0_start; reg [32-1:0] th_blink; localparam th_blink_init = 0; reg signed [32-1:0] _th_blink_times_0; reg signed [32-1:0] _th_blink_tid_1; reg [32-1:0] th_myfunc_a_0; localparam th_myfunc_a_0_init = 0; reg [32-1:0] th_myfunc_a_1; localparam th_myfunc_a_1_init = 0; reg [32-1:0] th_myfunc_a_2; localparam th_myfunc_a_2_init = 0; reg [32-1:0] th_myfunc_a_3; localparam th_myfunc_a_3_init = 0; reg [32-1:0] th_myfunc_b_0; localparam th_myfunc_b_0_init = 0; reg [32-1:0] th_myfunc_b_1; localparam th_myfunc_b_1_init = 0; reg [32-1:0] th_myfunc_b_2; localparam th_myfunc_b_2_init = 0; reg [32-1:0] th_myfunc_b_3; localparam th_myfunc_b_3_init = 0; reg _th_myfunc_a_0_called; reg signed [32-1:0] _th_myfunc_a_0_tid_2; reg signed [32-1:0] _th_myfunc_a_0_tid_3; reg signed [32-1:0] _th_myfunc_a_0_i_4; reg signed [32-1:0] _th_myfunc_a_0_tmp_5_6; reg _th_myfunc_a_1_called; reg signed [32-1:0] _th_myfunc_a_1_tid_7; reg signed [32-1:0] _th_myfunc_a_1_tid_8; reg signed [32-1:0] _th_myfunc_a_1_i_9; reg signed [32-1:0] _th_myfunc_a_1_tmp_10_11; reg _th_myfunc_a_2_called; reg signed [32-1:0] _th_myfunc_a_2_tid_12; reg signed [32-1:0] _th_myfunc_a_2_tid_13; reg signed [32-1:0] _th_myfunc_a_2_i_14; reg signed [32-1:0] _th_myfunc_a_2_tmp_15_16; reg _th_myfunc_a_3_called; reg signed [32-1:0] _th_myfunc_a_3_tid_17; reg signed [32-1:0] _th_myfunc_a_3_tid_18; reg signed [32-1:0] _th_myfunc_a_3_i_19; reg signed [32-1:0] _th_myfunc_a_3_tmp_20_21; reg _th_myfunc_b_0_called; reg signed [32-1:0] _th_myfunc_b_0_tid_22; reg signed [32-1:0] _th_myfunc_b_0_tid_23; reg signed [32-1:0] _th_myfunc_b_0_i_24; reg signed [32-1:0] _th_myfunc_b_0_tmp_25_26; reg _th_myfunc_b_1_called; reg signed [32-1:0] _th_myfunc_b_1_tid_27; reg signed [32-1:0] _th_myfunc_b_1_tid_28; reg signed [32-1:0] _th_myfunc_b_1_i_29; reg signed [32-1:0] _th_myfunc_b_1_tmp_30_31; reg _th_myfunc_b_2_called; reg signed [32-1:0] _th_myfunc_b_2_tid_32; reg signed [32-1:0] _th_myfunc_b_2_tid_33; reg signed [32-1:0] _th_myfunc_b_2_i_34; reg signed [32-1:0] _th_myfunc_b_2_tmp_35_36; reg _th_myfunc_b_3_called; reg signed [32-1:0] _th_myfunc_b_3_tid_37; reg signed [32-1:0] _th_myfunc_b_3_tid_38; reg signed [32-1:0] _th_myfunc_b_3_i_39; reg signed [32-1:0] _th_myfunc_b_3_tmp_40_41; reg signed [32-1:0] _th_blink_sum_42; localparam th_blink_1 = 1; localparam th_blink_2 = 2; localparam th_blink_3 = 3; localparam th_blink_4 = 4; localparam th_blink_5 = 5; localparam th_blink_6 = 6; localparam th_blink_7 = 7; localparam th_blink_8 = 8; localparam th_blink_9 = 9; localparam th_blink_10 = 10; localparam th_blink_11 = 11; localparam th_blink_12 = 12; localparam th_blink_13 = 13; localparam th_blink_14 = 14; always @(posedge CLK) begin if(RST) begin th_blink <= th_blink_init; _th_blink_times_0 <= 0; _th_blink_tid_1 <= 0; _th_myfunc_a_0_start[_th_blink_tid_1] <= (0 >> _th_blink_tid_1) & 1'd1; _th_blink_sum_42 <= 0; end else begin case(th_blink) th_blink_init: begin _th_blink_times_0 <= 20; th_blink <= th_blink_1; end th_blink_1: begin _th_blink_tid_1 <= 0; th_blink <= th_blink_2; end th_blink_2: begin if(_th_blink_tid_1 < 8) begin th_blink <= th_blink_3; end else begin th_blink <= th_blink_7; end end th_blink_3: begin _th_myfunc_a_0_start[_th_blink_tid_1] <= 1; th_blink <= th_blink_4; end th_blink_4: begin th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; end th_blink_5: begin _th_myfunc_a_0_start[_th_blink_tid_1] <= 0; th_blink <= th_blink_6; end th_blink_6: begin _th_blink_tid_1 <= _th_blink_tid_1 + 1; th_blink <= th_blink_2; end th_blink_7: begin _th_blink_sum_42 <= 0; th_blink <= th_blink_8; end th_blink_8: begin _th_blink_tid_1 <= 0; th_blink <= th_blink_9; end th_blink_9: begin if(_th_blink_tid_1 < 8) begin th_blink <= th_blink_10; end else begin th_blink <= th_blink_13; end end th_blink_10: begin if((_th_blink_tid_1 == 0)? th_myfunc_a_0 == 7 : (_th_blink_tid_1 == 1)? th_myfunc_a_1 == 7 : (_th_blink_tid_1 == 2)? th_myfunc_a_2 == 7 : (_th_blink_tid_1 == 3)? th_myfunc_a_3 == 7 : (_th_blink_tid_1 == 4)? th_myfunc_b_0 == 7 : (_th_blink_tid_1 == 5)? th_myfunc_b_1 == 7 : (_th_blink_tid_1 == 6)? th_myfunc_b_2 == 7 : (_th_blink_tid_1 == 7)? th_myfunc_b_3 == 7 : 0) begin th_blink <= th_blink_11; end end th_blink_11: begin _th_blink_sum_42 <= _th_blink_sum_42 + ((_th_blink_tid_1 == 0)? _th_myfunc_a_0_tmp_5_6 : (_th_blink_tid_1 == 1)? _th_myfunc_a_1_tmp_10_11 : (_th_blink_tid_1 == 2)? _th_myfunc_a_2_tmp_15_16 : (_th_blink_tid_1 == 3)? _th_myfunc_a_3_tmp_20_21 : (_th_blink_tid_1 == 4)? _th_myfunc_b_0_tmp_25_26 : (_th_blink_tid_1 == 5)? _th_myfunc_b_1_tmp_30_31 : (_th_blink_tid_1 == 6)? _th_myfunc_b_2_tmp_35_36 : (_th_blink_tid_1 == 7)? _th_myfunc_b_3_tmp_40_41 : 'hx); th_blink <= th_blink_12; end th_blink_12: begin _th_blink_tid_1 <= _th_blink_tid_1 + 1; th_blink <= th_blink_9; end th_blink_13: begin $display("sum = %d", _th_blink_sum_42); th_blink <= th_blink_14; end endcase end end localparam th_myfunc_a_0_1 = 1; localparam th_myfunc_a_0_2 = 2; localparam th_myfunc_a_0_3 = 3; localparam th_myfunc_a_0_4 = 4; localparam th_myfunc_a_0_5 = 5; localparam th_myfunc_a_0_6 = 6; localparam th_myfunc_a_0_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_a_0 <= th_myfunc_a_0_init; _th_myfunc_a_0_called <= 0; _th_myfunc_a_0_tid_2 <= 0; _th_myfunc_a_0_tid_3 <= 0; _th_myfunc_a_0_i_4 <= 0; _th_myfunc_a_0_tmp_5_6 <= 0; end else begin case(th_myfunc_a_0) th_myfunc_a_0_init: begin if(_th_myfunc_a_0_start[0] && (th_blink == 4)) begin _th_myfunc_a_0_called <= 1; end if(_th_myfunc_a_0_start[0] && (th_blink == 4)) begin _th_myfunc_a_0_tid_2 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[0]) begin th_myfunc_a_0 <= th_myfunc_a_0_1; end end th_myfunc_a_0_1: begin _th_myfunc_a_0_tid_3 <= _th_myfunc_a_0_tid_2; th_myfunc_a_0 <= th_myfunc_a_0_2; end th_myfunc_a_0_2: begin $display("myfunc_a: tid = %d", _th_myfunc_a_0_tid_3); th_myfunc_a_0 <= th_myfunc_a_0_3; end th_myfunc_a_0_3: begin _th_myfunc_a_0_i_4 <= 0; th_myfunc_a_0 <= th_myfunc_a_0_4; end th_myfunc_a_0_4: begin if(_th_myfunc_a_0_i_4 < 30 - _th_myfunc_a_0_tid_3) begin th_myfunc_a_0 <= th_myfunc_a_0_5; end else begin th_myfunc_a_0 <= th_myfunc_a_0_6; end end th_myfunc_a_0_5: begin _th_myfunc_a_0_i_4 <= _th_myfunc_a_0_i_4 + 1; th_myfunc_a_0 <= th_myfunc_a_0_4; end th_myfunc_a_0_6: begin _th_myfunc_a_0_tmp_5_6 <= _th_myfunc_a_0_tid_3 + 100; th_myfunc_a_0 <= th_myfunc_a_0_7; end endcase end end localparam th_myfunc_a_1_1 = 1; localparam th_myfunc_a_1_2 = 2; localparam th_myfunc_a_1_3 = 3; localparam th_myfunc_a_1_4 = 4; localparam th_myfunc_a_1_5 = 5; localparam th_myfunc_a_1_6 = 6; localparam th_myfunc_a_1_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_a_1 <= th_myfunc_a_1_init; _th_myfunc_a_1_called <= 0; _th_myfunc_a_1_tid_7 <= 0; _th_myfunc_a_1_tid_8 <= 0; _th_myfunc_a_1_i_9 <= 0; _th_myfunc_a_1_tmp_10_11 <= 0; end else begin case(th_myfunc_a_1) th_myfunc_a_1_init: begin if(_th_myfunc_a_0_start[1] && (th_blink == 4)) begin _th_myfunc_a_1_called <= 1; end if(_th_myfunc_a_0_start[1] && (th_blink == 4)) begin _th_myfunc_a_1_tid_7 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[1]) begin th_myfunc_a_1 <= th_myfunc_a_1_1; end end th_myfunc_a_1_1: begin _th_myfunc_a_1_tid_8 <= _th_myfunc_a_1_tid_7; th_myfunc_a_1 <= th_myfunc_a_1_2; end th_myfunc_a_1_2: begin $display("myfunc_a: tid = %d", _th_myfunc_a_1_tid_8); th_myfunc_a_1 <= th_myfunc_a_1_3; end th_myfunc_a_1_3: begin _th_myfunc_a_1_i_9 <= 0; th_myfunc_a_1 <= th_myfunc_a_1_4; end th_myfunc_a_1_4: begin if(_th_myfunc_a_1_i_9 < 30 - _th_myfunc_a_1_tid_8) begin th_myfunc_a_1 <= th_myfunc_a_1_5; end else begin th_myfunc_a_1 <= th_myfunc_a_1_6; end end th_myfunc_a_1_5: begin _th_myfunc_a_1_i_9 <= _th_myfunc_a_1_i_9 + 1; th_myfunc_a_1 <= th_myfunc_a_1_4; end th_myfunc_a_1_6: begin _th_myfunc_a_1_tmp_10_11 <= _th_myfunc_a_1_tid_8 + 100; th_myfunc_a_1 <= th_myfunc_a_1_7; end endcase end end localparam th_myfunc_a_2_1 = 1; localparam th_myfunc_a_2_2 = 2; localparam th_myfunc_a_2_3 = 3; localparam th_myfunc_a_2_4 = 4; localparam th_myfunc_a_2_5 = 5; localparam th_myfunc_a_2_6 = 6; localparam th_myfunc_a_2_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_a_2 <= th_myfunc_a_2_init; _th_myfunc_a_2_called <= 0; _th_myfunc_a_2_tid_12 <= 0; _th_myfunc_a_2_tid_13 <= 0; _th_myfunc_a_2_i_14 <= 0; _th_myfunc_a_2_tmp_15_16 <= 0; end else begin case(th_myfunc_a_2) th_myfunc_a_2_init: begin if(_th_myfunc_a_0_start[2] && (th_blink == 4)) begin _th_myfunc_a_2_called <= 1; end if(_th_myfunc_a_0_start[2] && (th_blink == 4)) begin _th_myfunc_a_2_tid_12 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[2]) begin th_myfunc_a_2 <= th_myfunc_a_2_1; end end th_myfunc_a_2_1: begin _th_myfunc_a_2_tid_13 <= _th_myfunc_a_2_tid_12; th_myfunc_a_2 <= th_myfunc_a_2_2; end th_myfunc_a_2_2: begin $display("myfunc_a: tid = %d", _th_myfunc_a_2_tid_13); th_myfunc_a_2 <= th_myfunc_a_2_3; end th_myfunc_a_2_3: begin _th_myfunc_a_2_i_14 <= 0; th_myfunc_a_2 <= th_myfunc_a_2_4; end th_myfunc_a_2_4: begin if(_th_myfunc_a_2_i_14 < 30 - _th_myfunc_a_2_tid_13) begin th_myfunc_a_2 <= th_myfunc_a_2_5; end else begin th_myfunc_a_2 <= th_myfunc_a_2_6; end end th_myfunc_a_2_5: begin _th_myfunc_a_2_i_14 <= _th_myfunc_a_2_i_14 + 1; th_myfunc_a_2 <= th_myfunc_a_2_4; end th_myfunc_a_2_6: begin _th_myfunc_a_2_tmp_15_16 <= _th_myfunc_a_2_tid_13 + 100; th_myfunc_a_2 <= th_myfunc_a_2_7; end endcase end end localparam th_myfunc_a_3_1 = 1; localparam th_myfunc_a_3_2 = 2; localparam th_myfunc_a_3_3 = 3; localparam th_myfunc_a_3_4 = 4; localparam th_myfunc_a_3_5 = 5; localparam th_myfunc_a_3_6 = 6; localparam th_myfunc_a_3_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_a_3 <= th_myfunc_a_3_init; _th_myfunc_a_3_called <= 0; _th_myfunc_a_3_tid_17 <= 0; _th_myfunc_a_3_tid_18 <= 0; _th_myfunc_a_3_i_19 <= 0; _th_myfunc_a_3_tmp_20_21 <= 0; end else begin case(th_myfunc_a_3) th_myfunc_a_3_init: begin if(_th_myfunc_a_0_start[3] && (th_blink == 4)) begin _th_myfunc_a_3_called <= 1; end if(_th_myfunc_a_0_start[3] && (th_blink == 4)) begin _th_myfunc_a_3_tid_17 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[3]) begin th_myfunc_a_3 <= th_myfunc_a_3_1; end end th_myfunc_a_3_1: begin _th_myfunc_a_3_tid_18 <= _th_myfunc_a_3_tid_17; th_myfunc_a_3 <= th_myfunc_a_3_2; end th_myfunc_a_3_2: begin $display("myfunc_a: tid = %d", _th_myfunc_a_3_tid_18); th_myfunc_a_3 <= th_myfunc_a_3_3; end th_myfunc_a_3_3: begin _th_myfunc_a_3_i_19 <= 0; th_myfunc_a_3 <= th_myfunc_a_3_4; end th_myfunc_a_3_4: begin if(_th_myfunc_a_3_i_19 < 30 - _th_myfunc_a_3_tid_18) begin th_myfunc_a_3 <= th_myfunc_a_3_5; end else begin th_myfunc_a_3 <= th_myfunc_a_3_6; end end th_myfunc_a_3_5: begin _th_myfunc_a_3_i_19 <= _th_myfunc_a_3_i_19 + 1; th_myfunc_a_3 <= th_myfunc_a_3_4; end th_myfunc_a_3_6: begin _th_myfunc_a_3_tmp_20_21 <= _th_myfunc_a_3_tid_18 + 100; th_myfunc_a_3 <= th_myfunc_a_3_7; end endcase end end localparam th_myfunc_b_0_1 = 1; localparam th_myfunc_b_0_2 = 2; localparam th_myfunc_b_0_3 = 3; localparam th_myfunc_b_0_4 = 4; localparam th_myfunc_b_0_5 = 5; localparam th_myfunc_b_0_6 = 6; localparam th_myfunc_b_0_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_b_0 <= th_myfunc_b_0_init; _th_myfunc_b_0_called <= 0; _th_myfunc_b_0_tid_22 <= 0; _th_myfunc_b_0_tid_23 <= 0; _th_myfunc_b_0_i_24 <= 0; _th_myfunc_b_0_tmp_25_26 <= 0; end else begin case(th_myfunc_b_0) th_myfunc_b_0_init: begin if(_th_myfunc_a_0_start[4] && (th_blink == 4)) begin _th_myfunc_b_0_called <= 1; end if(_th_myfunc_a_0_start[4] && (th_blink == 4)) begin _th_myfunc_b_0_tid_22 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[4]) begin th_myfunc_b_0 <= th_myfunc_b_0_1; end end th_myfunc_b_0_1: begin _th_myfunc_b_0_tid_23 <= _th_myfunc_b_0_tid_22; th_myfunc_b_0 <= th_myfunc_b_0_2; end th_myfunc_b_0_2: begin $display("myfunc_b: tid = %d", _th_myfunc_b_0_tid_23); th_myfunc_b_0 <= th_myfunc_b_0_3; end th_myfunc_b_0_3: begin _th_myfunc_b_0_i_24 <= 0; th_myfunc_b_0 <= th_myfunc_b_0_4; end th_myfunc_b_0_4: begin if(_th_myfunc_b_0_i_24 < 30 - _th_myfunc_b_0_tid_23) begin th_myfunc_b_0 <= th_myfunc_b_0_5; end else begin th_myfunc_b_0 <= th_myfunc_b_0_6; end end th_myfunc_b_0_5: begin _th_myfunc_b_0_i_24 <= _th_myfunc_b_0_i_24 + 1; th_myfunc_b_0 <= th_myfunc_b_0_4; end th_myfunc_b_0_6: begin _th_myfunc_b_0_tmp_25_26 <= _th_myfunc_b_0_tid_23 + 200; th_myfunc_b_0 <= th_myfunc_b_0_7; end endcase end end localparam th_myfunc_b_1_1 = 1; localparam th_myfunc_b_1_2 = 2; localparam th_myfunc_b_1_3 = 3; localparam th_myfunc_b_1_4 = 4; localparam th_myfunc_b_1_5 = 5; localparam th_myfunc_b_1_6 = 6; localparam th_myfunc_b_1_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_b_1 <= th_myfunc_b_1_init; _th_myfunc_b_1_called <= 0; _th_myfunc_b_1_tid_27 <= 0; _th_myfunc_b_1_tid_28 <= 0; _th_myfunc_b_1_i_29 <= 0; _th_myfunc_b_1_tmp_30_31 <= 0; end else begin case(th_myfunc_b_1) th_myfunc_b_1_init: begin if(_th_myfunc_a_0_start[5] && (th_blink == 4)) begin _th_myfunc_b_1_called <= 1; end if(_th_myfunc_a_0_start[5] && (th_blink == 4)) begin _th_myfunc_b_1_tid_27 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[5]) begin th_myfunc_b_1 <= th_myfunc_b_1_1; end end th_myfunc_b_1_1: begin _th_myfunc_b_1_tid_28 <= _th_myfunc_b_1_tid_27; th_myfunc_b_1 <= th_myfunc_b_1_2; end th_myfunc_b_1_2: begin $display("myfunc_b: tid = %d", _th_myfunc_b_1_tid_28); th_myfunc_b_1 <= th_myfunc_b_1_3; end th_myfunc_b_1_3: begin _th_myfunc_b_1_i_29 <= 0; th_myfunc_b_1 <= th_myfunc_b_1_4; end th_myfunc_b_1_4: begin if(_th_myfunc_b_1_i_29 < 30 - _th_myfunc_b_1_tid_28) begin th_myfunc_b_1 <= th_myfunc_b_1_5; end else begin th_myfunc_b_1 <= th_myfunc_b_1_6; end end th_myfunc_b_1_5: begin _th_myfunc_b_1_i_29 <= _th_myfunc_b_1_i_29 + 1; th_myfunc_b_1 <= th_myfunc_b_1_4; end th_myfunc_b_1_6: begin _th_myfunc_b_1_tmp_30_31 <= _th_myfunc_b_1_tid_28 + 200; th_myfunc_b_1 <= th_myfunc_b_1_7; end endcase end end localparam th_myfunc_b_2_1 = 1; localparam th_myfunc_b_2_2 = 2; localparam th_myfunc_b_2_3 = 3; localparam th_myfunc_b_2_4 = 4; localparam th_myfunc_b_2_5 = 5; localparam th_myfunc_b_2_6 = 6; localparam th_myfunc_b_2_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_b_2 <= th_myfunc_b_2_init; _th_myfunc_b_2_called <= 0; _th_myfunc_b_2_tid_32 <= 0; _th_myfunc_b_2_tid_33 <= 0; _th_myfunc_b_2_i_34 <= 0; _th_myfunc_b_2_tmp_35_36 <= 0; end else begin case(th_myfunc_b_2) th_myfunc_b_2_init: begin if(_th_myfunc_a_0_start[6] && (th_blink == 4)) begin _th_myfunc_b_2_called <= 1; end if(_th_myfunc_a_0_start[6] && (th_blink == 4)) begin _th_myfunc_b_2_tid_32 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[6]) begin th_myfunc_b_2 <= th_myfunc_b_2_1; end end th_myfunc_b_2_1: begin _th_myfunc_b_2_tid_33 <= _th_myfunc_b_2_tid_32; th_myfunc_b_2 <= th_myfunc_b_2_2; end th_myfunc_b_2_2: begin $display("myfunc_b: tid = %d", _th_myfunc_b_2_tid_33); th_myfunc_b_2 <= th_myfunc_b_2_3; end th_myfunc_b_2_3: begin _th_myfunc_b_2_i_34 <= 0; th_myfunc_b_2 <= th_myfunc_b_2_4; end th_myfunc_b_2_4: begin if(_th_myfunc_b_2_i_34 < 30 - _th_myfunc_b_2_tid_33) begin th_myfunc_b_2 <= th_myfunc_b_2_5; end else begin th_myfunc_b_2 <= th_myfunc_b_2_6; end end th_myfunc_b_2_5: begin _th_myfunc_b_2_i_34 <= _th_myfunc_b_2_i_34 + 1; th_myfunc_b_2 <= th_myfunc_b_2_4; end th_myfunc_b_2_6: begin _th_myfunc_b_2_tmp_35_36 <= _th_myfunc_b_2_tid_33 + 200; th_myfunc_b_2 <= th_myfunc_b_2_7; end endcase end end localparam th_myfunc_b_3_1 = 1; localparam th_myfunc_b_3_2 = 2; localparam th_myfunc_b_3_3 = 3; localparam th_myfunc_b_3_4 = 4; localparam th_myfunc_b_3_5 = 5; localparam th_myfunc_b_3_6 = 6; localparam th_myfunc_b_3_7 = 7; always @(posedge CLK) begin if(RST) begin th_myfunc_b_3 <= th_myfunc_b_3_init; _th_myfunc_b_3_called <= 0; _th_myfunc_b_3_tid_37 <= 0; _th_myfunc_b_3_tid_38 <= 0; _th_myfunc_b_3_i_39 <= 0; _th_myfunc_b_3_tmp_40_41 <= 0; end else begin case(th_myfunc_b_3) th_myfunc_b_3_init: begin if(_th_myfunc_a_0_start[7] && (th_blink == 4)) begin _th_myfunc_b_3_called <= 1; end if(_th_myfunc_a_0_start[7] && (th_blink == 4)) begin _th_myfunc_b_3_tid_37 <= _th_blink_tid_1; end if((th_blink == 4) && _th_myfunc_a_0_start[7]) begin th_myfunc_b_3 <= th_myfunc_b_3_1; end end th_myfunc_b_3_1: begin _th_myfunc_b_3_tid_38 <= _th_myfunc_b_3_tid_37; th_myfunc_b_3 <= th_myfunc_b_3_2; end th_myfunc_b_3_2: begin $display("myfunc_b: tid = %d", _th_myfunc_b_3_tid_38); th_myfunc_b_3 <= th_myfunc_b_3_3; end th_myfunc_b_3_3: begin _th_myfunc_b_3_i_39 <= 0; th_myfunc_b_3 <= th_myfunc_b_3_4; end th_myfunc_b_3_4: begin if(_th_myfunc_b_3_i_39 < 30 - _th_myfunc_b_3_tid_38) begin th_myfunc_b_3 <= th_myfunc_b_3_5; end else begin th_myfunc_b_3 <= th_myfunc_b_3_6; end end th_myfunc_b_3_5: begin _th_myfunc_b_3_i_39 <= _th_myfunc_b_3_i_39 + 1; th_myfunc_b_3 <= th_myfunc_b_3_4; end th_myfunc_b_3_6: begin _th_myfunc_b_3_tmp_40_41 <= _th_myfunc_b_3_tid_38 + 200; th_myfunc_b_3 <= th_myfunc_b_3_7; end endcase end end endmodule """<def_stmt>test <block_start>veriloggen.reset()<line_sep>test_module=thread_to_thread_pool.mkTest()<line_sep>code=test_module.to_verilog()<import_from_stmt>pyverilog.vparser.parser VerilogParser<import_from_stmt>pyverilog.ast_code_generator.codegen ASTCodeGenerator<line_sep>parser=VerilogParser()<line_sep>expected_ast=parser.parse(expected_verilog)<line_sep>codegen=ASTCodeGenerator()<line_sep>expected_code=codegen.visit(expected_ast)<assert_stmt>(expected_code<eq>code)<block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2016 Ericsson AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>calvin.actor.actor Actor manage condition calvinsys stateguard<import_from_stmt>calvin.utilities.calvinlogger get_logger<line_sep>_log=get_logger(__name__)<class_stmt>Pushbullet(Actor)<block_start>""" Post incoming tokens (text) to runtime specific pushbullet channel with given title Input: message : A message """<line_sep>@manage(["title"])<def_stmt>init self title<block_start>self.title=title<line_sep>self.setup()<block_end><def_stmt>did_migrate self<block_start>self.setup()<block_end><def_stmt>setup self<block_start>self._pb=calvinsys.open(self "web.pushbullet.channel.post")<block_end><def_stmt>teardown self<block_start>calvinsys.close(self._pb)<block_end><def_stmt>will_migrate self<block_start>self.teardown()<block_end><def_stmt>will_end self<block_start>self.teardown()<block_end>@stateguard(<lambda>self:self._pb<and>calvinsys.can_write(self._pb))@condition(action_input=['message'])<def_stmt>post_update self message<block_start>calvinsys.write(self._pb {"message":message "title":self.title})<block_end>action_priority=(post_update )<line_sep>requires=['web.pushbullet.channel.post']<line_sep>test_kwargs={'title':"Some Title"}<line_sep>test_calvinsys={'web.pushbullet.channel.post':{'write':[{'message':'A message' 'title':'Some Title'}]}}<line_sep>test_set=[{'inports':{'message':["A message"]}}]<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """<import_stmt>os<import_stmt>sys<import_stmt>platform<line_sep>__is_windows_system=platform.platform().lower().startswith('window')<line_sep>__is_linux_system=platform.platform().lower().startswith('linux')<if_stmt>__is_windows_system<block_start>sys.path.append(os.path.dirname(__file__))<line_sep>sys.path.append(os.path.join(os.path.dirname(__file__) 'windows'))<import_from_stmt>windows.detect_util main<block_end><elif_stmt>__is_linux_system<block_start>sys.path.append(os.path.dirname(__file__))<line_sep>sys.path.append(os.path.join(os.path.dirname(__file__) 'ubuntu'))<import_from_stmt>ubuntu.detect_util main<block_end><else_stmt><block_start><raise>Exception('system is not support!')<block_end>""" parser = argparse.ArgumentParser(description='RefineDet Training') ## basic configurations parser.add_argument('-v', '--version', default='Refine_hc2net_version3', help='Refine_vgg, Refine_mobile, Refine_hcnet, Refine_hc2net, Refine_hc2net_version2, Refine_hc2net_version3, ' 'Refine_hc2net_version4, Refine_shufflenetv2, Refine_mobilenetv2, Refine_mobilenetv3, ' 'Refine_mobilenetv3_version2, Refine_mobilenetv3_version3, Refine_resnet101, Refine_resnet101_heavy') parser.add_argument('-s', '--size', default=320, type=int, help='320, 512 (512 support Refine_hc2net_version3, Refine_resnet101, Refine_resnet101_heavy)') parser.add_argument('-d', '--dataset', default='self_dataset', help='VOC, COCO, OpenImage500, Objects365 or self dataset') parser.add_argument('--num_classes', default=5, type=int, help='number of classes, including background') ## pretained model parser.add_argument('-m', '--trained_model', default='weights/Refine_hc2net_version3_320/model/Final_Refine_hc2net_version3_self_dataset.pth', type=str, help='Trained state_dict file path to open') parser.add_argument('--onnx_model', default='weights/Refine_hc2net_version3_320/model/Final_Refine_hc2net_version3_self_dataset.onnx', type=str, help='output onnx model') ## post processing parser.add_argument('-n', '--nms_type', default='soft', help='nms type: normal, soft') parser.add_argument('--obj_thresh', default=0.50, type=float, help='object threshold for testing') parser.add_argument('--nms_thresh', default=0.45, type=float, help='nms threshold for testing') ## src images parser.add_argument('-f', '--test_images', default='./test_images', help='test images can be folder, image or txt file') parser.add_argument('--image_nums', default=100, type=int, help='maximum number of test images, -1 means all images in test_images') parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results') parser.add_argument('--label_list', default='./test_dataset.txt', type=str, help='test image label list') ## platform parser.add_argument('--cuda', default=False, type=str2bool, help='Use cuda to train model') parser.add_argument('--inference_platform', default='pytorch', type=str, help='inference platform: caffe2, pytorch') """<if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>json<import_stmt>codecs<import_stmt>pickle<import_stmt>pathlib<import_stmt>itertools<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>collections defaultdict<import_from_stmt>functools partial<import_from_stmt>multiprocessing Pool<import_from_stmt>datetime datetime<import_stmt>matplotlib<line_sep>matplotlib.use("Agg")<import_from_stmt>plotnine ggplot aes theme geom_density geom_histogram geom_point scale_color_gradient labs <def_stmt>process_log_line x<block_start>"""Process a single line of the log"""<line_sep>obj=x["object"]<line_sep>date=datetime.strptime(x["date"][:-6] "%a %b %d %Y %H:%M:%S %Z%z")<line_sep>relative_position=obj["time_elapsed"]/obj["time_remaining"]<line_sep><return>([date obj["guess"] obj["qid"] obj["time_elapsed"] obj["time_remaining"] relative_position obj["ruling"] obj["user"]["id"] ] obj["qid"] obj["question_text"] )<block_end># remove duplicate records <def_stmt>remove_duplicate df_grouped uid<block_start>"""For each user, only take the first record for each question"""<line_sep>group=df_grouped.get_group(uid)<line_sep>user_questions=set()<line_sep>index=group.date.sort_values()<line_sep>rows=[]<for_stmt>_,row group.loc[index.index].iterrows()<block_start><if_stmt>row.qid<in>user_questions<block_start><continue><block_end>user_questions.add(row.qid)<line_sep>rows.append(row)<block_end><for_stmt>j,row enumerate(rows)<block_start>rows[j].user_n_records=len(rows)<block_end><return>rows<block_end><def_stmt>load_protobowl protobowl_dir="data/external/datasets/protobowl/protobowl-042818.log" min_user_questions=20 get_questions=<false> <block_start>"""Parse protobowl log, return buzz data and questions. Filter users that answered less than `min_user_questions` questions. Remove duplicates: for each user, only keep the first record for each question. Args protobowl_dir: json log min_user_questions: minimum number of questions answered Return df: dataframe of buzzing records questions: protobowl questions """<line_sep>df_dir=protobowl_dir+".h5"<line_sep>question_dir=protobowl_dir+".questions.pkl"<if_stmt>os.path.exists(df_dir)<and>os.path.exists(df_dir)<block_start><with_stmt>pd.HDFStore(df_dir)<as>store<block_start>df=store["data"]<block_end><with_stmt>open(question_dir "rb")<as>f<block_start>questions=pickle.load(f)<block_end><if_stmt>get_questions<block_start><return>df questions<block_end><else_stmt><block_start><return>df<block_end><block_end># parse protobowl json log data=[]<line_sep>count=0<line_sep>user_questions=defaultdict(set)<line_sep>questions=dict()<with_stmt>codecs.open(protobowl_dir "r" "utf-8")<as>f<block_start>line=f.readline()<while_stmt>line<is><not><none><block_start>line=line.strip()<if_stmt>len(line)<l>1<block_start><break><block_end><while_stmt><not>line.endswith("}}")<block_start>_line=f.readline()<if_stmt>_line<is><none><block_start><break><block_end>line<augadd>_line.strip()<block_end><try_stmt><block_start>line=json.loads(line)<block_end><except_stmt>ValueError<block_start>line=f.readline()<if_stmt>line<is><none><block_start><break><block_end><continue><block_end>count<augadd>1<if_stmt>count%10000<eq>0<block_start>sys.stderr.write("\rdone: {}/5130000".format(count))<block_end>x,qid,question_text=process_log_line(line)<if_stmt>qid<not><in>questions<block_start>questions[qid]=question_text<block_end>user_questions[x[-1]].add(qid)# x[-1] is uid data.append(x)<line_sep>line=f.readline()<block_end><block_end># filter users without enough questions filtered_data=[]<for_stmt>x data<block_start>uid=x[-1]<if_stmt>len(user_questions[uid])<ge>min_user_questions<block_start>x.append(len(user_questions[uid]))<line_sep>filtered_data.append(x)<block_end><block_end>df=pd.DataFrame(filtered_data columns=["date" "guess" "qid" "time_elapsed" "time_remaining" "relative_position" "result" "uid" "user_n_records" ] )<line_sep>df_grouped=df.groupby("uid")<line_sep>uids=list(df_grouped.groups.keys())<line_sep>pool=Pool(8)<line_sep>_remove_duplicate=partial(remove_duplicate df_grouped)<line_sep>user_rows=pool.map(_remove_duplicate uids)<line_sep>df=pd.DataFrame(list(itertools.chain(*user_rows)) columns=df.columns)<line_sep>df_grouped=df.groupby("uid")<line_sep>print("{} users".format(len(df_grouped)))<line_sep>print("{} records".format(len(df)))<line_sep>print("{} questions".format(len(set(df.qid))))<line_sep># save <with_stmt>pd.HDFStore(df_dir)<as>store<block_start>store["data"]=df<block_end><with_stmt>open(question_dir "wb")<as>f<block_start>pickle.dump(questions f)<block_end><if_stmt>get_questions<block_start><return>df questions<block_end><else_stmt><block_start><return>df<block_end><block_end><def_stmt>plot <block_start>outdir="output/protobowl/"<line_sep>pathlib.Path(outdir).mkdir(parents=<true> exist_ok=<true>)<line_sep>df=load_protobowl()<line_sep>df.result=df.result.apply(<lambda>x:x<is><true>)<line_sep>df["log_n_records"]=df.user_n_records.apply(np.log)<line_sep>df_user_grouped=df.groupby("uid")<line_sep>user_stat=df_user_grouped.agg(np.mean)<line_sep>print("{} users".format(len(user_stat)))<line_sep>print("{} records".format(len(df)))<line_sep>max_color=user_stat.log_n_records.max()<line_sep>user_stat["alpha"]=pd.Series(user_stat.log_n_records.apply(<lambda>x:x/max_color) index=user_stat.index)<line_sep># 2D user plot p0=(ggplot(user_stat)+geom_point(aes(x="relative_position" y="result" size="user_n_records" color="log_n_records" alpha="alpha" ) show_legend={"color":<false> "alpha":<false> "size":<false>} )+scale_color_gradient(high="#e31a1c" low="#ffffcc")+labs(x="Average buzzing position" y="Accuracy")+theme(aspect_ratio=1))<line_sep>p0.save(os.path.join(outdir "protobowl_users.pdf"))<line_sep># p0.draw() print("p0 done")<line_sep># histogram of number of records p1=(ggplot(user_stat aes(x="log_n_records" y="..density.."))+geom_histogram(color="#e6550d" fill="#fee6ce")+geom_density()+labs(x="Log number of records" y="Density")+theme(aspect_ratio=0.3))<line_sep>p1.save(os.path.join(outdir "protobowl_hist.pdf"))<line_sep># p1.draw() print("p1 done")<line_sep># histogram of accuracy p2=(ggplot(user_stat aes(x="result" y="..density.."))+geom_histogram(color="#31a354" fill="#e5f5e0")+geom_density()+labs(x="Accuracy" y="Density")+theme(aspect_ratio=0.3))<line_sep>p2.save(os.path.join(outdir "protobowl_acc.pdf"))<line_sep># p2.draw() print("p2 done")<line_sep># histogram of buzzing position p3=(ggplot(user_stat aes(x="relative_position" y="..density.."))+geom_histogram(color="#3182bd" fill="#deebf7")+geom_density()+labs(x="Average buzzing position" y="Density")+theme(aspect_ratio=0.3))<line_sep>p3.save(os.path.join(outdir "protobowl_pos.pdf"))<line_sep># p3.draw() print("p3 done")<block_end><if_stmt>__name__<eq>"__main__"<block_start>plot()<block_end>
# -*- coding: utf-8 -*- """ @author: <NAME> @name: Bootstrap Utilities @summary: This module provides helpful functions for calculating the bootstrap confidence intervals. """<import_from_future_stmt> absolute_import<import_from_stmt>numbers Number<import_stmt>numpy<as>np<def_stmt>check_conf_percentage_validity conf_percentage<block_start>""" Ensures that `conf_percentage` is in (0, 100). Raises a helpful ValueError if otherwise. """<line_sep>msg="conf_percentage MUST be a number between 0.0 and 100."<line_sep>condition_1=isinstance(conf_percentage Number)<if_stmt><not>condition_1<block_start><raise>ValueError(msg)<block_end><else_stmt><block_start>condition_2=0<l>conf_percentage<l>100<if_stmt><not>condition_2<block_start><raise>ValueError(msg)<block_end><block_end><return><none><block_end><def_stmt>ensure_samples_is_ndim_ndarray samples name='bootstrap' ndim=2<block_start>""" Ensures that `samples` is an `ndim` numpy array. Raises a helpful ValueError if otherwise. """<assert_stmt>isinstance(ndim int)<assert_stmt>isinstance(name str)<if_stmt><not>isinstance(samples np.ndarray)<or><not>(samples.ndim<eq>ndim)<block_start>sample_name=name+"_samples"<line_sep>msg="`{}` MUST be a {}D ndarray.".format(sample_name ndim)<line_sep><raise>ValueError(msg)<block_end><return><none><block_end><def_stmt>get_alpha_from_conf_percentage conf_percentage<block_start>""" Calculates `100 - conf_percentage`, which is useful for calculating alpha levels. """<line_sep><return>100.0-conf_percentage<block_end><def_stmt>combine_conf_endpoints lower_array upper_array<block_start>""" Concatenates upper and lower endpoint arrays for a given confidence level. """<line_sep><return>np.concatenate([lower_array[<none> :] upper_array[<none> :]] axis=0)<block_end>
<import_stmt>pytest<import_from_stmt>helpers.cluster ClickHouseCluster<line_sep>cluster=ClickHouseCluster(__file__)<line_sep>instance=cluster.add_instance('instance')<line_sep>@pytest.fixture(scope="module" autouse=<true>)<def_stmt>started_cluster <block_start><try_stmt><block_start>cluster.start()<line_sep><yield>cluster<block_end><finally_stmt><block_start>cluster.shutdown()<block_end><block_end><def_stmt>test_access_rights_for_funtion <block_start>create_function_query="CREATE FUNCTION MySum AS (a, b) -> a + b"<line_sep>instance.query("CREATE USER A")<line_sep>instance.query("CREATE USER B")<assert_stmt>"it's necessary to have grant CREATE FUNCTION ON *.*"<in>instance.query_and_get_error(create_function_query user='A')<line_sep>instance.query("GRANT CREATE FUNCTION on *.* TO A")<line_sep>instance.query(create_function_query user='A')<assert_stmt>instance.query("SELECT MySum(1, 2)")<eq>"3\n"<assert_stmt>"it's necessary to have grant DROP FUNCTION ON *.*"<in>instance.query_and_get_error("DROP FUNCTION MySum" user='B')<line_sep>instance.query("GRANT DROP FUNCTION ON *.* TO B")<line_sep>instance.query("DROP FUNCTION MySum" user='B')<assert_stmt>"Unknown function MySum"<in>instance.query_and_get_error("SELECT MySum(1, 2)")<line_sep>instance.query("REVOKE CREATE FUNCTION ON *.* FROM A")<assert_stmt>"it's necessary to have grant CREATE FUNCTION ON *.*"<in>instance.query_and_get_error(create_function_query user='A')<line_sep>instance.query("DROP USER IF EXISTS A")<line_sep>instance.query("DROP USER IF EXISTS B")<block_end>
""" This package contains a UVM like simulation agents to handle IO between circuit running in simulator and the code which drives the simulation. """<line_sep>
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_from_stmt>core perf_benchmark<import_from_stmt>contrib.cluster_telemetry ct_benchmarks_util<import_from_stmt>contrib.cluster_telemetry page_set<import_from_stmt>contrib.cluster_telemetry repaint_helpers<import_from_stmt>contrib.cluster_telemetry screenshot<class_stmt>ScreenshotCT(perf_benchmark.PerfBenchmark)<block_start>"""Captures PNG screenshots of web pages for Cluster Telemetry. Screenshots written to local file with path-safe urls of pages as filenames. Cluster Telemetry is then used for aggregation and analysis."""<line_sep>@classmethod<def_stmt>Name cls<block_start><return>'screenshot_ct'<block_end>@classmethod<def_stmt>AddBenchmarkCommandLineArgs cls parser<block_start>ct_benchmarks_util.AddBenchmarkCommandLineArgs(parser)<line_sep>parser.add_option('--png-outdir' type='string' default=<none> help='Output directory for the PNG files')<line_sep>parser.add_option('--wait-time' type='float' default=0 help='Wait time before the benchmark is started')<line_sep>parser.add_option('--dc-detect' action='store_true' dest='dc_detect' default=<false> help='Detects dynamic content by marking'<concat>'pixels that were not consistent across multiple '<concat>'screenshots with cyan')<line_sep>parser.add_option('--dc-wait-time' type='float' default=1 help='Wait time in between screenshots. Only applicable '<concat>'if dc_detect flag is true.')<line_sep>parser.add_option('--dc-extra-screenshots' type='int' default=1 help='Number of extra screenshots taken to detect '<concat>'dynamic content. Only applicable if dc_detect flag is '<concat>'true.')<line_sep>parser.add_option('--dc-threshold' type='float' default=0.5 help='Maximum tolerable percentage of dynamic content '<concat>'pixels. Raises an exception if percentage of dynamic '<concat>'content is beyond this threshold. Only applicable if '<concat>'dc_detect flag is true.')<block_end>@classmethod<def_stmt>ProcessCommandLineArgs cls parser args<block_start>ct_benchmarks_util.ValidateCommandLineArgs(parser args)<if_stmt><not>args.png_outdir<block_start>parser.error('Please specify --png-outdir')<block_end><block_end><def_stmt>CreatePageTest self options<block_start><return>screenshot.Screenshot(options.png_outdir options.wait_time options.dc_detect options.dc_wait_time options.dc_extra_screenshots options.dc_threshold)<block_end><def_stmt>CreateStorySet self options<block_start><return>page_set.CTPageSet(options.urls_list options.user_agent options.archive_data_file run_page_interaction_callback=repaint_helpers.WaitThenRepaint)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>time<import_from_stmt>os environ<line_sep>environ["OMP_NUM_THREADS"]="4"<line_sep>environ["OPENBLAS_NUM_THREADS"]="4"<line_sep>environ["MKL_NUM_THREADS"]="4"<line_sep>environ["VECLIB_MAXIMUM_THREADS"]="4"<line_sep>environ["NUMEXPR_NUM_THREADS"]="4"<import_stmt>numpy<as>np<import_stmt>unittest<import_from_stmt>.base TestBase<import_from_stmt>buffalo.evaluate.base Evaluable<line_sep>scores=np.random.uniform(size=(100 100000)).astype(np.float32)<line_sep>topk=10<def_stmt>time_np_argsort <block_start>st=time.time()<line_sep>res=np.argsort(-scores)[: :topk]<line_sep>el=time.time()-st<line_sep><return>res el<block_end><def_stmt>time_np_argpartition <block_start>st=time.time()<line_sep>res=np.argpartition(-scores topk)[: :topk]<line_sep>res=np.array([sorted(row key=<lambda>x:-scores[i x])<for>i,row enumerate(res)])<line_sep>el=time.time()-st<line_sep><return>res el<block_end><def_stmt>time_quickselect <block_start>ev=Evaluable()<line_sep>st=time.time()<line_sep>res=ev.get_topk(scores k=topk num_threads=4)<line_sep>el=time.time()-st<line_sep><return>res el<block_end><class_stmt>TestQuickSelect(TestBase)<block_start><def_stmt>test_0_quickselect self<block_start>res_argsort,t_np_argsort=time_np_argsort()<line_sep>res_argpart,t_np_argparttion=time_np_argpartition()<line_sep>res_quickselect,t_quickselect=time_quickselect()<line_sep>self.assertGreaterEqual(t_np_argsort/t_quickselect 1)<line_sep>self.assertGreaterEqual(t_np_argparttion/t_quickselect 1)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>time<import_from_stmt>pydmd HODMD<def_stmt>myfunc x<block_start><return>np.cos(x)<times>np.sin(np.cos(x))+np.cos(x<times>.2)<block_end>x=np.linspace(0 10 64)<line_sep>y=myfunc(x)<line_sep>snapshots=y<line_sep>plt.plot(x snapshots '.')<line_sep>plt.show()<line_sep>hodmd=HODMD(svd_rank=0 exact=<true> opt=<true> d=30).fit(snapshots)<line_sep>hodmd.reconstructed_data.shape<line_sep>hodmd.plot_eigs()<line_sep>hodmd.original_time['dt']=hodmd.dmd_time['dt']=x[1]-x[0]<line_sep>hodmd.original_time['t0']=hodmd.dmd_time['t0']=x[0]<line_sep>hodmd.original_time['tend']=hodmd.dmd_time['tend']=x[-1]<line_sep>plt.plot(hodmd.original_timesteps snapshots '.' label='snapshots')<line_sep>plt.plot(hodmd.original_timesteps y '-' label='original function')<line_sep>plt.plot(hodmd.dmd_timesteps hodmd.reconstructed_data[0].real '--' label='DMD output')<line_sep>plt.legend()<line_sep>plt.show()<line_sep>hodmd.dmd_time['tend']=50<line_sep>fig=plt.figure(figsize=(15 5))<line_sep>plt.plot(hodmd.original_timesteps snapshots '.' label='snapshots')<line_sep>plt.plot(np.linspace(0 50 128) myfunc(np.linspace(0 50 128)) '-' label='original function')<line_sep>plt.plot(hodmd.dmd_timesteps hodmd.reconstructed_data[0].real '--' label='DMD output')<line_sep>plt.legend()<line_sep>plt.show()<line_sep>noise_range=[.01 .05 .1 .2]<line_sep>fig=plt.figure(figsize=(15 10))<line_sep>future=20<for_stmt>id_plot,i enumerate(noise_range start=1)<block_start>snapshots=y+np.random.uniform(-i i size=y.shape)<line_sep>hodmd=HODMD(svd_rank=0 exact=<true> opt=<true> d=30).fit(snapshots)<line_sep>hodmd.original_time['dt']=hodmd.dmd_time['dt']=x[1]-x[0]<line_sep>hodmd.original_time['t0']=hodmd.dmd_time['t0']=x[0]<line_sep>hodmd.original_time['tend']=hodmd.dmd_time['tend']=x[-1]<line_sep>hodmd.dmd_time['tend']=20<line_sep>plt.subplot(2 2 id_plot)<line_sep>plt.plot(hodmd.original_timesteps snapshots '.' label='snapshots')<line_sep>plt.plot(np.linspace(0 future 128) myfunc(np.linspace(0 future 128)) '-' label='original function')<line_sep>plt.plot(hodmd.dmd_timesteps hodmd.reconstructed_data[0].real '--' label='DMD output')<line_sep>plt.legend()<line_sep>plt.title('Noise [{} - {}]'.format(-i i))<block_end>plt.show()<line_sep>
# -*- coding: utf-8 """Like the eponymous built-in module but with additional back-ported functonality if any. """<line_sep>__all__=[]<import_from_stmt>collections.abc *<import_from_stmt>collections abc<as>_abc<line_sep>__all__<augadd>_abc.__all__<try_stmt><block_start><import_from_stmt>_collections_abc _check_methods<block_end><except_stmt>ImportError<block_start><def_stmt>_check_methods C *methods<block_start>mro=C.__mro__<for_stmt>method methods<block_start><for_stmt>B mro<block_start><if_stmt>method<in>B.__dict__<block_start><if_stmt>B.__dict__[method]<is><none><block_start><return>NotImplemented<block_end><break><block_end><block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><return><true><block_end><block_end><if_stmt>"Collection"<not><in>locals()<block_start>__all__.append("Collection")<class_stmt>Collection(Sized Iterable Container)<block_start>__slots__=()<line_sep>@classmethod<def_stmt>__subclasshook__ cls C<block_start><if_stmt>cls<is><not>Collection<block_start><return>NotImplemented<block_end><return>_check_methods(C "__len__" "__iter__" "__contains__")<block_end><block_end>Collection.register(Set)<line_sep>Collection.register(Sequence)<line_sep>Collection.register(Mapping)<block_end>
# Copyright 2014 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>attr<line_sep># how long a user can wait before validating a session after starting it THREEPID_SESSION_VALIDATION_TIMEOUT_MS=24<times>60<times>60<times>1000<line_sep># how long we keep sessions for after they've been validated THREEPID_SESSION_VALID_LIFETIME_MS=24<times>60<times>60<times>1000<line_sep>@attr.s(frozen=<true> slots=<true> auto_attribs=<true>)<class_stmt>ValidationSession<block_start>id:int<line_sep>medium:str<line_sep>address:str<line_sep>client_secret:str<line_sep>validated:bool<line_sep>mtime:int<block_end>@attr.s(frozen=<true> slots=<true> auto_attribs=<true>)<class_stmt>TokenInfo<block_start>token:str<line_sep>send_attempt_number:int<block_end><class_stmt>IncorrectClientSecretException(Exception)<block_start><pass><block_end><class_stmt>SessionExpiredException(Exception)<block_start><pass><block_end><class_stmt>InvalidSessionIdException(Exception)<block_start><pass><block_end><class_stmt>IncorrectSessionTokenException(Exception)<block_start><pass><block_end><class_stmt>SessionNotValidatedException(Exception)<block_start><pass><block_end><class_stmt>DestinationRejectedException(Exception)<block_start><pass><block_end>
"""JuiceShop Updates Revision ID: <KEY> Revises: 18d11f218dfe Create Date: 2019-11-14 08:52:52.530520 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<line_sep># revision identifiers, used by Alembic. revision="<KEY>"<line_sep>down_revision="18d11f218dfe"<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade <block_start>op.alter_column("category" "_category" existing_type=sa.VARCHAR(length=24) type_=sa.VARCHAR(length=64) )<block_end><def_stmt>downgrade <block_start>op.alter_column("category" "_category" existing_type=sa.VARCHAR(length=64) type_=sa.VARCHAR(length=24) )<block_end>
<def_stmt>solve x:int<arrow>int<block_start>x=y# err z=x+1<line_sep><return>y<block_end># err
<import_stmt>unittest<import_stmt>topologylayer<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>topologylayer.util.process remove_zero_bars remove_infinite_bars<class_stmt>AlphaTest(unittest.TestCase)<block_start><def_stmt>test self<block_start><import_from_stmt>topologylayer.nn AlphaLayer<line_sep># superlevel set <for_stmt>alg ['hom' 'hom2' 'cohom']<block_start>layer=AlphaLayer(maxdim=1 alg=alg)<line_sep>x=torch.tensor([[1 1] [1 -1] [-1 -1] [-1 1]] dtype=torch.float).requires_grad_(<true>)<line_sep>dgms,issub=layer(x)<line_sep>self.assertEqual(issub <true> "Expected sublevel set layer. alg="+alg)<line_sep>self.assertEqual(torch.all(torch.eq(remove_infinite_bars(remove_zero_bars(dgms[0]) issub) torch.tensor([[0. 2.] [0. 2.] [0. 2.]]))) <true> "unexpected 0-dim barcode. alg="+alg)<line_sep>self.assertEqual(torch.all(torch.eq(remove_zero_bars(dgms[1]) torch.tensor([[2. 2.8284270763397217]]))) <true> "unexpected 1-dim barcode. alg="+alg)<line_sep>d0=remove_infinite_bars(remove_zero_bars(dgms[0]) issub)<line_sep>p=torch.sum(d0[: 1]-d0[: 0])<line_sep>p.backward()<line_sep>self.assertEqual(torch.all(torch.eq(x.grad torch.tensor([[1 1] [1 -1] [-1 0] [-1 0]] dtype=torch.float))) <true> "unexpected gradient. alg="+alg)<block_end><block_end><block_end>
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. <import_from_stmt>pathlib Path<import_from_stmt>tempfile TemporaryDirectory<import_from_stmt>typing Dict List NamedTuple Optional<import_from_stmt>urllib request<import_from_stmt>zipfile ZipFile<import_from_stmt>pandas.tseries.frequencies to_offset<import_from_stmt>gluonts json<import_from_stmt>gluonts.dataset jsonl<import_from_stmt>gluonts.dataset.field_names FieldName<import_from_stmt>gluonts.gluonts_tqdm tqdm<import_from_stmt>._tsf_reader TSFReader frequency_converter<import_from_stmt>._util metadata request_retrieve_hook to_dict<class_stmt>Dataset(NamedTuple)<block_start>file_name:str<line_sep>record:str<line_sep>ROOT:str="https://zenodo.org/record"<line_sep>@property<def_stmt>url self<block_start><return>f"{self.ROOT}/{self.record}/files/{self.file_name}"<block_end><def_stmt>download self path:Path<block_start>file_path=path/self.file_name<with_stmt>tqdm([] unit="B" unit_scale=<true> unit_divisor=1024 miniters=5 desc=f"Download {self.file_name}:" )<as>_tqdm<block_start>request.urlretrieve(self.url filename=file_path reporthook=request_retrieve_hook(_tqdm) )<block_end><return>file_path<block_end><block_end>datasets={"kaggle_web_traffic_with_missing":Dataset(file_name="kaggle_web_traffic_dataset_with_missing_values.zip" record="4656080" ) "kaggle_web_traffic_without_missing":Dataset(file_name="kaggle_web_traffic_dataset_without_missing_values.zip" record="4656075" ) "kaggle_web_traffic_weekly":Dataset(file_name="kaggle_web_traffic_weekly_dataset.zip" record="4656664" ) "m1_yearly":Dataset(file_name="m1_yearly_dataset.zip" record="4656193") "m1_quarterly":Dataset(file_name="m1_quarterly_dataset.zip" record="4656154") "m1_monthly":Dataset(file_name="m1_monthly_dataset.zip" record="4656159") "nn5_daily_with_missing":Dataset(file_name="nn5_daily_dataset_with_missing_values.zip" record="4656110" ) "nn5_daily_without_missing":Dataset(file_name="nn5_daily_dataset_without_missing_values.zip" record="4656117" ) "nn5_weekly":Dataset(file_name="nn5_weekly_dataset.zip" record="4656125") "tourism_monthly":Dataset(file_name="tourism_monthly_dataset.zip" record="4656096" ) "tourism_quarterly":Dataset(file_name="tourism_quarterly_dataset.zip" record="4656093" ) "tourism_yearly":Dataset(file_name="tourism_yearly_dataset.zip" record="4656103" ) "cif_2016":Dataset(file_name="cif_2016_dataset.zip" record="4656042" ) "london_smart_meters_without_missing":Dataset(file_name="london_smart_meters_dataset_without_missing_values.zip" record="4656091" ) "wind_farms_without_missing":Dataset(file_name="wind_farms_minutely_dataset_without_missing_values.zip" record="4654858" ) "car_parts_without_missing":Dataset(file_name="car_parts_dataset_without_missing_values.zip" record="4656021" ) "dominick":Dataset(file_name="dominick_dataset.zip" record="4654802" ) "fred_md":Dataset(file_name="fred_md_dataset.zip" record="4654833" ) "pedestrian_counts":Dataset(file_name="pedestrian_counts_dataset.zip" record="4656626" ) "hospital":Dataset(file_name="hospital_dataset.zip" record="4656014" ) "covid_deaths":Dataset(file_name="covid_deaths_dataset.zip" record="4656009" ) "kdd_cup_2018_without_missing":Dataset(file_name="kdd_cup_2018_dataset_without_missing_values.zip" record="4656756" ) "weather":Dataset(file_name="weather_dataset.zip" record="4654822" ) }<def_stmt>save_metadata dataset_path:Path cardinality:int freq:str prediction_length:int<block_start><with_stmt>open(dataset_path/"metadata.json" "w")<as>file<block_start>json.dump(metadata(cardinality=cardinality freq=freq prediction_length=prediction_length ) file )<block_end><block_end><def_stmt>save_datasets path:Path data:List[Dict] train_offset:int<block_start>train=path/"train"<line_sep>test=path/"test"<line_sep>train.mkdir(exist_ok=<true>)<line_sep>test.mkdir(exist_ok=<true>)<with_stmt>open(train/"data.json" "w")<as>train_fp open(test/"data.json" "w")<as>test_fp<block_start><for_stmt>data_entry tqdm(data total=len(data) desc="creating json files")<block_start>dic=to_dict(target_values=data_entry["target"] start=str(data_entry["start_timestamp"]) )<line_sep>jsonl.dump([dic] test_fp)<line_sep>dic["target"]=dic["target"][:-train_offset]<line_sep>jsonl.dump([dic] train_fp)<block_end><block_end><block_end><def_stmt>generate_forecasting_dataset dataset_path:Path dataset_name:str prediction_length:Optional[int]=<none> <block_start>dataset=datasets[dataset_name]<line_sep>dataset_path.mkdir(exist_ok=<true>)<with_stmt>TemporaryDirectory()<as>temp_dir<block_start>temp_path=Path(temp_dir)<with_stmt>ZipFile(dataset.download(temp_path))<as>archive<block_start>archive.extractall(path=temp_path)<block_end># only one file is exptected reader=TSFReader(temp_path/archive.namelist()[0])<line_sep>meta,data=reader.read()<block_end>freq=frequency_converter(meta.frequency)<if_stmt>prediction_length<is><none><block_start><if_stmt>hasattr(meta "forecast_horizon")<block_start>prediction_length=int(meta.forecast_horizon)<block_end><else_stmt><block_start>prediction_length=default_prediction_length_from_frequency(freq)<block_end><block_end>save_metadata(dataset_path len(data) freq prediction_length)<line_sep># Impute missing start dates with unix epoch and remove time series whose # length is less than or equal to the prediction length data=[{**d "start_timestamp":d.get("start_timestamp" "1970-01-01")}<for>d data<if>len(d[FieldName.TARGET])<g>prediction_length]<line_sep>save_datasets(dataset_path data prediction_length)<block_end><def_stmt>default_prediction_length_from_frequency freq:str<arrow>int<block_start>prediction_length_map={"T":60 "H":48 "D":30 "W":8 "M":12 "Y":4 }<try_stmt><block_start>freq=to_offset(freq).name<line_sep><return>prediction_length_map[freq]<block_end><except_stmt>KeyError<as>err<block_start><raise>ValueError(f"Cannot obtain default prediction length from frequency `{freq}`.")<from>err<block_end><block_end>
<import_stmt>json<import_stmt>traceback<import_from_stmt>typing Dict cast<import_stmt>ansible_runner<import_stmt>demistomock<as>demisto# noqa: F401 <import_stmt>ssh_agent_setup<import_from_stmt>CommonServerPython *# noqa: F401 # Dict to Markdown Converter adapted from https://github.com/PolBaladas/torsimany/ <def_stmt>dict2md json_block depth=0<block_start>markdown=""<if_stmt>isinstance(json_block dict)<block_start>markdown=parseDict(json_block depth)<block_end><if_stmt>isinstance(json_block list)<block_start>markdown=parseList(json_block depth)<block_end><return>markdown<block_end><def_stmt>parseDict d depth<block_start>markdown=""<for_stmt>k d<block_start><if_stmt>isinstance(d[k] (dict list))<block_start>markdown<augadd>addHeader(k depth)<line_sep>markdown<augadd>dict2md(d[k] depth+1)<block_end><else_stmt><block_start>markdown<augadd>buildValueChain(k d[k] depth)<block_end><block_end><return>markdown<block_end><def_stmt>parseList rawlist depth<block_start>markdown=""<for_stmt>value rawlist<block_start><if_stmt><not>isinstance(value (dict list))<block_start>index=rawlist.index(value)<line_sep>markdown<augadd>buildValueChain(index value depth)<block_end><else_stmt><block_start>markdown<augadd>parseDict(value depth)<block_end><block_end><return>markdown<block_end><def_stmt>buildHeaderChain depth<block_start>list_tag='* '<line_sep>htag='#'<line_sep>chain=list_tag<times>(bool(depth))+htag<times>(depth+1)+' value '+(htag<times>(depth+1)+'\n')<line_sep><return>chain<block_end><def_stmt>buildValueChain key value depth<block_start>tab=" "<line_sep>list_tag='* '<line_sep>chain=tab<times>(bool(depth-1))+list_tag+str(key)+": "+str(value)+"\n"<line_sep><return>chain<block_end><def_stmt>addHeader value depth<block_start>chain=buildHeaderChain(depth)<line_sep>chain=chain.replace('value' value.title())<line_sep><return>chain<block_end># Remove ansible branding from results <def_stmt>rec_ansible_key_strip obj<block_start><if_stmt>isinstance(obj dict)<block_start><return>{key.replace('ansible_' ''):rec_ansible_key_strip(val)<for>key,val obj.items()}<block_end><return>obj<block_end># COMMAND FUNCTIONS <def_stmt>generic_ansible integration_name command args:Dict[str Any]<arrow>CommandResults<block_start>readable_output=""<line_sep>sshkey=""<line_sep>fork_count=1# default to executing against 1 host at a time <if_stmt>args.get('concurrency')<block_start>fork_count=cast(int args.get('concurrency'))<block_end>inventory:Dict[str dict]={}<line_sep>inventory['all']={}<line_sep>inventory['all']['hosts']={}<line_sep>inventory['all']['hosts']['localhost']={}<line_sep>inventory['all']['hosts']['localhost']['ansible_connection']='local'<line_sep>module_args=""<line_sep># build module args list <for_stmt>arg_key,arg_value args.items()# skip hardcoded host arg, as it doesn't related to module <block_start><if_stmt>arg_key<eq>'host'<block_start><continue><block_end>module_args<augadd>"%s=\"%s\" "%(arg_key arg_value)<block_end># If this isn't host based, then all the integratation parms will be used as command args <for_stmt>arg_key,arg_value demisto.params().items()<block_start>module_args<augadd>"%s=\"%s\" "%(arg_key arg_value)<block_end>r=ansible_runner.run(inventory=inventory host_pattern='all' module=command quiet=<true> omit_event_data=<true> ssh_key=sshkey module_args=module_args forks=fork_count)<line_sep>results=[]<for_stmt>each_host_event r.events# Troubleshooting # demisto.log("%s: %s\n" % (each_host_event['event'], each_host_event)) <block_start><if_stmt>each_host_event['event']<in>["runner_on_ok" "runner_on_unreachable" "runner_on_failed"]# parse results <block_start>result=json.loads('{'+each_host_event['stdout'].split('{' 1)[1])<line_sep>host=each_host_event['stdout'].split('|' 1)[0].strip()<line_sep>status=each_host_event['stdout'].replace('=>' '|').split('|' 3)[1]<line_sep># if successful build outputs <if_stmt>each_host_event['event']<eq>"runner_on_ok"<block_start><if_stmt>'fact'<in>command<block_start>result=result['ansible_facts']<block_end><else_stmt><block_start><if_stmt>result.get(command)<is><not><none><block_start>result=result[command]<block_end><else_stmt><block_start>result.pop("ansible_facts" <none>)<block_end><block_end>result=rec_ansible_key_strip(result)<if_stmt>host<ne>"localhost"<block_start>readable_output<augadd>"# %s - %s\n"%(host status)<block_end><else_stmt># This is integration is not host based <block_start>readable_output<augadd>"# %s\n"%status<block_end>readable_output<augadd>dict2md(result)<line_sep># add host and status to result result['host']=host<line_sep>result['status']=status<line_sep>results.append(result)<block_end><if_stmt>each_host_event['event']<eq>"runner_on_unreachable"<block_start>msg="Host %s unreachable\nError Details: %s"%(host result)<line_sep>return_error(msg)<block_end><if_stmt>each_host_event['event']<eq>"runner_on_failed"<block_start>msg="Host %s failed running command\nError Details: %s"%(host result)<line_sep>return_error(msg)<block_end><block_end><block_end># This is integration is not host based and always runs against localhost results=results[0]<line_sep><return>CommandResults(readable_output=readable_output outputs_prefix=integration_name+'.'+command outputs_key_field='' outputs=results)<block_end># MAIN FUNCTION <def_stmt>main <arrow><none><block_start>"""main function, parses params and runs command functions :return: :rtype: """<line_sep># SSH Key integration requires ssh_agent to be running in the background ssh_agent_setup.setup()<try_stmt><block_start><if_stmt>demisto.command()<eq>'test-module'# This is the call made when pressing the integration Test button. <block_start>return_results('ok')<block_end><elif_stmt>demisto.command()<eq>'vmware-about-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_about_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-category'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_category' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-category-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_category_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-cfg-backup'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_cfg_backup' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-cluster'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_cluster' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-cluster-drs'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_cluster_drs' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-cluster-ha'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_cluster_ha' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-cluster-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_cluster_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-cluster-vsan'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_cluster_vsan' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-content-deploy-template'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_content_deploy_template' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-content-library-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_content_library_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-content-library-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_content_library_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-datacenter'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_datacenter' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-datastore-cluster'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_datastore_cluster' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-datastore-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_datastore_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-datastore-maintenancemode'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_datastore_maintenancemode' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dns-config'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dns_config' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-drs-group'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_drs_group' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-drs-group-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_drs_group_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-drs-rule-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_drs_rule_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvs-host'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvs_host' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvs-portgroup'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvs_portgroup' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvs-portgroup-find'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvs_portgroup_find' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvs-portgroup-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvs_portgroup_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvswitch'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvswitch' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvswitch-lacp'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvswitch_lacp' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvswitch-nioc'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvswitch_nioc' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvswitch-pvlans'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvswitch_pvlans' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-dvswitch-uplink-pg'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_dvswitch_uplink_pg' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-evc-mode'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_evc_mode' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-folder-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_folder_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-boot-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_boot_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-boot-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_boot_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-custom-attribute-defs'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_custom_attribute_defs' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-custom-attributes'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_custom_attributes' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-customization-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_customization_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-disk'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_disk' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-disk-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_disk_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-find'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_find' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-move'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_move' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-network'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_network' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-powerstate'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_powerstate' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-screenshot'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_screenshot' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-sendkey'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_sendkey' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-snapshot'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_snapshot' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-snapshot-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_snapshot_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-tools-upgrade'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_tools_upgrade' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-tools-wait'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_tools_wait' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-video'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_video' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-guest-vnc'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_guest_vnc' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-acceptance'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_acceptance' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-active-directory'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_active_directory' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-capability-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_capability_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-config-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_config_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-config-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_config_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-datastore'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_datastore' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-dns-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_dns_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-facts'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_facts' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-feature-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_feature_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-firewall-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_firewall_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-firewall-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_firewall_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-hyperthreading'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_hyperthreading' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-ipv6'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_ipv6' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-kernel-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_kernel_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-lockdown'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_lockdown' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-ntp'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_ntp' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-ntp-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_ntp_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-package-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_package_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-powermgmt-policy'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_powermgmt_policy' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-powerstate'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_powerstate' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-scanhba'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_scanhba' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-service-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_service_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-service-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_service_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-snmp'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_snmp' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-ssl-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_ssl_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-vmhba-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_vmhba_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-host-vmnic-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_host_vmnic_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-local-role-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_local_role_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-local-role-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_local_role_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-local-user-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_local_user_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-local-user-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_local_user_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-maintenancemode'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_maintenancemode' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-migrate-vmk'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_migrate_vmk' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-object-role-permission'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_object_role_permission' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-portgroup'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_portgroup' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-portgroup-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_portgroup_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-resource-pool'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_resource_pool' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-resource-pool-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_resource_pool_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-tag'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_tag' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-tag-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_tag_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-tag-manager'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_tag_manager' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-target-canonical-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_target_canonical_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vcenter-settings'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vcenter_settings' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vcenter-statistics'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vcenter_statistics' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vm-host-drs-rule'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vm_host_drs_rule' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vm-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vm_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vm-shell'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vm_shell' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vm-storage-policy-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vm_storage_policy_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vm-vm-drs-rule'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vm_vm_drs_rule' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vm-vss-dvs-migrate'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vm_vss_dvs_migrate' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vmkernel'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vmkernel' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vmkernel-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vmkernel_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vmkernel-ip-config'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vmkernel_ip_config' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vmotion'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vmotion' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vsan-cluster'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vsan_cluster' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vspan-session'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vspan_session' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vswitch'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vswitch' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vswitch-info'<block_start>return_results(generic_ansible('vmwarev2' 'vmware_vswitch_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vsphere-file'<block_start>return_results(generic_ansible('vmwarev2' 'vsphere_file' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vcenter-extension'<block_start>return_results(generic_ansible('vmwarev2' 'vcenter_extension' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vcenter-extension-info'<block_start>return_results(generic_ansible('vmwarev2' 'vcenter_extension_info' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vcenter-folder'<block_start>return_results(generic_ansible('vmwarev2' 'vcenter_folder' demisto.args()))<block_end><elif_stmt>demisto.command()<eq>'vmware-vcenter-license'<block_start>return_results(generic_ansible('vmwarev2' 'vcenter_license' demisto.args()))<block_end><block_end># Log exceptions and return errors <except_stmt>Exception<as>e<block_start>demisto.error(traceback.format_exc())# print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')<block_end><block_end># ENTRY POINT <if_stmt>__name__<in>('__main__' '__builtin__' 'builtins')<block_start>main()<block_end>
<import_from_stmt>NetworKit *<import_stmt>urllib.parse<import_stmt>collections<def_stmt>analyzeWebCommunities graphPath urlsPath<block_start>print("reading input...")<line_sep>G=readGraph(graphPath)<line_sep>urls=LineFileReader().read(urlsPath)<line_sep>urlmap=retrieveAttributes(G.nodes() urls)<line_sep>print("community detection...")<line_sep>zeta=LabelPropagation().run(G)<line_sep>communitySizes=zeta.clusterSizes()<line_sep>communityIds=[i<for>i range(len(communitySizes))<if>communitySizes[i]<g>0]<line_sep>netlocMap={}# community -> size distribution of netlocs <for_stmt>cid communityIds<block_start><if_stmt>communitySizes[cid]<g>100# filter <block_start>community=zeta.getMembers(cid)<line_sep>urllib.parse.urlparse(v).netloc<block_end><block_end><block_end><def_stmt>getNetloc2nodes G urls<block_start>netlocs=[urllib.parse.urlparse(url).netloc<for>url urls]<line_sep>netloc2nodes={}<for_stmt>u range(netlocs)<block_start>uloc=netlocs[u]<if_stmt>uloc<not><in>netloc2nodes<block_start>netlocs2nodes[uloc]=[]<block_end>netloc2nodes[uloc].append(u)<block_end><return>netloc2nodes<block_end><def_stmt>toLocations urls<block_start>""" Turn the list of urls into a list of locations"""<line_sep>errors=0# count the parser errors locs=[]<for_stmt>url urls<block_start><try_stmt><block_start>parsed=urllib.parse.urlparse(url)<line_sep>locs.append(parsed.netloc)<block_end><except_stmt><block_start>print("URL parser error occurred")<line_sep>errors<augadd>1<line_sep>locs.append(<none>)<block_end><block_end><return>locs<block_end><def_stmt>getLocations nodes urls<block_start>""" Given a collection of nodes, return a set of net locations (domains) to which they belong"""<line_sep>theurls=dict((u urls[u])<for>u nodes)<line_sep>loclist=[urllib.parse.urlparse(url).netloc<for>url theurls]<block_end><def_stmt>matchAndIndex substring ls exact=<false><block_start>matching={}<if_stmt>exact<block_start>i=0<for_stmt>s ls<block_start><if_stmt>substring<eq>s<block_start>matching[i]=s<block_end>i<augadd>1<block_end><block_end><else_stmt><block_start>i=0<for_stmt>s ls<block_start><if_stmt>substring<in>s<block_start>matching[i]=s<block_end>i<augadd>1<block_end><block_end><return>matching<block_end><def_stmt>writeSeeds match filename<block_start>file=open(filename "w")<line_sep>string=",".join([str(s)<for>s match.keys()])<line_sep>file.write(string)<line_sep>file.close()<block_end><def_stmt>matchLocs substring ls<block_start>match=matchAndIndex(substring ls)<line_sep>locs=set(match.values())<line_sep><return>locs<block_end>
<import_stmt>time<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>a_nice_mc.utils.logger create_logger<line_sep>logger=create_logger(__name__)<def_stmt>kinetic_energy v<block_start><return>0.5<times>tf.reduce_sum(tf.multiply(v v) axis=1)<block_end><def_stmt>hamiltonian p v f<block_start>""" Return the value of the Hamiltonian :param p: position variable :param v: velocity variable :param f: energy function :return: hamiltonian """<line_sep><return>f(p)+kinetic_energy(v)<block_end><def_stmt>metropolis_hastings_accept energy_prev energy_next<block_start>""" Run Metropolis-Hastings algorithm for 1 step :param energy_prev: :param energy_next: :return: Tensor of boolean values, indicating accept or reject """<line_sep>energy_diff=energy_prev-energy_next<line_sep><return>(tf.exp(energy_diff)-tf.random_uniform(tf.shape(energy_prev)))<ge>0.0<block_end><def_stmt>simulate_dynamics initial_pos initial_vel stepsize n_steps energy_fn<block_start><def_stmt>leapfrog pos vel step i<block_start>de_dp_=tf.gradients(tf.reduce_sum(energy_fn(pos)) pos)[0]<line_sep>new_vel_=vel-step<times>de_dp_<line_sep>new_pos_=pos+step<times>new_vel_<line_sep><return>[new_pos_ new_vel_ step tf.add(i 1)]<block_end><def_stmt>condition pos vel step i<block_start><return>tf.less(i n_steps)<block_end>de_dp=tf.gradients(tf.reduce_sum(energy_fn(initial_pos)) initial_pos)[0]<line_sep>vel_half_step=initial_vel-0.5<times>stepsize<times>de_dp<line_sep>pos_full_step=initial_pos+stepsize<times>vel_half_step<line_sep>i=tf.constant(0)<line_sep>final_pos,new_vel,_,_=tf.while_loop(condition leapfrog [pos_full_step vel_half_step stepsize i])<line_sep>de_dp=tf.gradients(tf.reduce_sum(energy_fn(final_pos)) final_pos)[0]<line_sep>final_vel=new_vel-0.5<times>stepsize<times>de_dp<line_sep><return>final_pos final_vel<block_end><def_stmt>hmc_move initial_pos energy_fn stepsize n_steps<block_start>initial_vel=tf.random_normal(tf.shape(initial_pos))<line_sep>final_pos,final_vel=simulate_dynamics(initial_pos=initial_pos initial_vel=initial_vel stepsize=stepsize n_steps=n_steps energy_fn=energy_fn)<line_sep>accept=metropolis_hastings_accept(energy_prev=hamiltonian(initial_pos initial_vel energy_fn) energy_next=hamiltonian(final_pos final_vel energy_fn))<line_sep><return>accept final_pos final_vel<block_end><def_stmt>hmc_updates initial_pos stepsize avg_acceptance_rate final_pos accept target_acceptance_rate stepsize_inc stepsize_dec stepsize_min stepsize_max avg_acceptance_slowness<block_start>new_pos=tf.where(accept final_pos initial_pos)<line_sep>new_stepsize_=tf.multiply(stepsize tf.where(tf.greater(avg_acceptance_rate target_acceptance_rate) stepsize_inc stepsize_dec))<line_sep>new_stepsize=tf.maximum(tf.minimum(new_stepsize_ stepsize_max) stepsize_min)<line_sep>new_acceptance_rate=tf.add(avg_acceptance_slowness<times>avg_acceptance_rate (1.0-avg_acceptance_slowness)<times>tf.reduce_mean(tf.to_float(accept)))<line_sep><return>new_pos new_stepsize new_acceptance_rate<block_end><class_stmt>HamiltonianMonteCarloSampler(object)<block_start>""" TensorFlow implementation for Hamiltonian Monte Carlo """<def_stmt>__init__ self energy_fn prior stepsize=0.1 n_steps=10 target_acceptance_rate=0.65 avg_acceptance_slowness=0.9 stepsize_min=0.001 stepsize_max=1000.0 stepsize_dec=0.97 stepsize_inc=1.03 inter_op_parallelism_threads=1 intra_op_parallelism_threads=1 sess=<false><block_start>self.energy_fn=energy_fn<line_sep>self.prior=prior<line_sep>self.z=self.energy_fn.z<line_sep>self.stepsize=tf.constant(stepsize)<line_sep>self.avg_acceptance_rate=tf.constant(target_acceptance_rate)<line_sep>self.sess=sess<def_stmt>fn zsa x<block_start>z,s,a=zsa<line_sep>accept,final_pos,final_vel=hmc_move(z energy_fn s n_steps)<line_sep>z_,s_,a_=hmc_updates(z s avg_acceptance_rate=a final_pos=final_pos accept=accept stepsize_min=stepsize_min stepsize_max=stepsize_max stepsize_dec=stepsize_dec stepsize_inc=stepsize_inc target_acceptance_rate=target_acceptance_rate avg_acceptance_slowness=avg_acceptance_slowness)<line_sep><return>z_ s_ a_<block_end>self.steps=tf.placeholder(tf.int32 [])<line_sep>elems=tf.zeros([self.steps])<line_sep>self.z_,self.stepsize_,self.avg_acceptance_rate_=tf.scan(fn elems (self.z self.stepsize self.avg_acceptance_rate) back_prop=<false>)<if_stmt><not>self.sess# only want to start a session if running this independently <block_start>self.sess=tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=inter_op_parallelism_threads intra_op_parallelism_threads=intra_op_parallelism_threads))<line_sep>self.sess.run(tf.global_variables_initializer())<block_end><block_end><def_stmt>sample self steps batch_size<block_start>start=time.time()<line_sep>z,stepsize,acceptance_rate=self.sess.run([self.z_ self.stepsize_ self.avg_acceptance_rate_] feed_dict={self.steps:steps self.z:self.prior(batch_size)})<line_sep>end=time.time()<line_sep>logger.info('batches [%d] steps [%d] time [%5.4f] steps/s [%5.4f]'%(batch_size steps end-start steps<times>batch_size/(end-start)))<line_sep>logger.info('average recent acceptance rate [%5.4f]'%np.mean(acceptance_rate[-int(steps<times>0.1):]))<line_sep>z=np.transpose(z [1 0 2])<line_sep><return>z<block_end><block_end>
"""Contains the backpropagation extension for grad_batch: BatchGrad. It defines the module extension for each module. """<import_from_stmt>typing List<import_from_stmt>torch.nn LSTM RNN BatchNorm1d BatchNorm2d BatchNorm3d Conv1d Conv2d Conv3d ConvTranspose1d ConvTranspose2d ConvTranspose3d Embedding Linear <import_from_stmt>backpack.extensions.firstorder.base FirstOrderBackpropExtension<import_from_stmt>. batchnorm_nd conv1d conv2d conv3d conv_transpose1d conv_transpose2d conv_transpose3d embedding linear rnn <class_stmt>BatchGrad(FirstOrderBackpropExtension)<block_start>"""Individual gradients for each sample in a minibatch. Stores the output in ``grad_batch`` as a ``[N x ...]`` tensor, where ``N`` batch size and ``...`` is the shape of the gradient. If ``subsampling`` is specified, ``N`` is replaced by the number of active samples. .. note:: Beware of scaling issue The `individual gradients` depend on the scaling of the overall function. Let ``fᵢ`` be the loss of the ``i`` th sample, with gradient ``gᵢ``. ``BatchGrad`` will return - ``[g₁, …, gₙ]`` if the loss is a sum, ``∑ᵢ₌₁ⁿ fᵢ``, - ``[¹/ₙ g₁, …, ¹/ₙ gₙ]`` if the loss is a mean, ``¹/ₙ ∑ᵢ₌₁ⁿ fᵢ``. The concept of individual gradients is only meaningful if the objective is a sum of independent functions (no batchnorm). """<def_stmt>__init__ self subsampling:List[int]=<none><block_start>"""Initialization. Defines extension for each module. Args: subsampling: Indices of samples in the mini-batch for which individual gradients will be computed. Defaults to ``None`` (use all samples). """<line_sep>super().__init__(savefield="grad_batch" module_exts={Linear:linear.BatchGradLinear() Conv1d:conv1d.BatchGradConv1d() Conv2d:conv2d.BatchGradConv2d() Conv3d:conv3d.BatchGradConv3d() ConvTranspose1d:conv_transpose1d.BatchGradConvTranspose1d() ConvTranspose2d:conv_transpose2d.BatchGradConvTranspose2d() ConvTranspose3d:conv_transpose3d.BatchGradConvTranspose3d() BatchNorm1d:batchnorm_nd.BatchGradBatchNormNd() BatchNorm2d:batchnorm_nd.BatchGradBatchNormNd() BatchNorm3d:batchnorm_nd.BatchGradBatchNormNd() RNN:rnn.BatchGradRNN() LSTM:rnn.BatchGradLSTM() Embedding:embedding.BatchGradEmbedding() } subsampling=subsampling )<block_end><block_end>
<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>torch<import_from_stmt>torch nn optim<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>dataset.p2p_dataset P2PDataset<import_from_stmt>model.refinenet RefineNet<import_from_stmt>config cfg<line_sep>checkpoint_dir=cfg.CHECKPOINT_DIR<line_sep>os.makedirs(checkpoint_dir exist_ok=<true>)<def_stmt>main <block_start>train_dataset=P2PDataset(dataset_path=cfg.DATA_DIR root_idx=cfg.DATASET.ROOT_IDX)<line_sep>train_loader=DataLoader(train_dataset batch_size=cfg.SOLVER.BATCH_SIZE shuffle=<true>)<line_sep>model=RefineNet()<line_sep>device=torch.device(cfg.MODEL.DEVICE)<line_sep>model.to(device)<if_stmt>len(cfg.MODEL.GPU_IDS)<g>1<block_start>model=nn.parallel.DataParallel(model device_ids=cfg.MODEL.GPU_IDS)<block_end>optimizer=optim.Adam(model.parameters() lr=cfg.SOLVER.BASE_LR betas=(0.9 0.999))<line_sep>scheduler=optim.lr_scheduler.StepLR(optimizer step_size=cfg.SOLVER.LR_STEP_SIZE gamma=cfg.SOLVER.GAMMA last_epoch=-1)<line_sep>criterion=nn.MSELoss()<line_sep>model.train()<for_stmt>epoch range(1 cfg.SOLVER.NUM_EPOCHS+1)<block_start>total_loss=0<line_sep>count=0<for_stmt>i,(inp gt) enumerate(train_loader)<block_start>count<augadd>1<line_sep>inp=inp.to(device)<line_sep>gt=gt.to(device)<line_sep>preds=model(inp)<line_sep>loss=criterion(preds gt)<line_sep>total_loss<augadd>loss.data.item()<line_sep>optimizer.zero_grad()<line_sep>loss.backward()<line_sep>optimizer.step()<block_end>scheduler.step()<line_sep>avg_loss=total_loss/count<if_stmt>epoch%cfg.PRINT_FREQ<eq>0<block_start>print("epoch: {} | loss: {}.".format(epoch avg_loss))<block_end><if_stmt>epoch%cfg.SAVE_FREQ<eq>0<or>epoch<eq>cfg.SOLVER.NUM_EPOCHS<block_start>torch.save(model.state_dict() osp.join(checkpoint_dir "RefineNet_epoch_%03d.pth"%epoch))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_future_stmt> division<import_stmt>re<import_stmt>socket<import_stmt>struct<import_stmt>time<import_from_stmt>kafka.tools log<import_from_stmt>kafka.tools.configuration ClientConfiguration<import_from_stmt>kafka.tools.models BaseModel<import_from_stmt>kafka.tools.exceptions ConfigurationException ConnectionError<import_from_stmt>kafka.tools.protocol.types.bytebuffer ByteBuffer<import_from_stmt>kafka.tools.utilities json_loads<class_stmt>Endpoint(BaseModel)<block_start>equality_attrs=['protocol' 'hostname' 'port']<def_stmt>__init__ self protocol hostname port<block_start>self.protocol=protocol<line_sep>self.hostname=hostname<line_sep>self.port=port<block_end><block_end><class_stmt>Broker(BaseModel)<block_start>equality_attrs=['hostname' 'id']<line_sep>@property<def_stmt>hostname self<block_start><return>self.endpoint.hostname<block_end>@hostname.setter<def_stmt>hostname self value<block_start>self.endpoint.hostname=value<block_end>@property<def_stmt>port self<block_start><return>self.endpoint.port<block_end>@port.setter<def_stmt>port self value<block_start>self.endpoint.port=value<block_end><def_stmt>__init__ self hostname id=0 port=9092 sock=<none> configuration=<none><block_start>self.id=id<line_sep>self.endpoint=Endpoint('' hostname port)<line_sep>self.jmx_port=-1<line_sep>self.rack=<none><line_sep>self.version=<none><line_sep>self.endpoints=<none><line_sep>self.timestamp=<none><line_sep>self.cluster=<none><line_sep>self.partitions={}<line_sep>self.endpoints=[]<line_sep>self._sock=sock<line_sep>self._correlation_id=1<line_sep>self._configuration=configuration<or>ClientConfiguration()<block_end>@classmethod<def_stmt>create_from_json cls broker_id jsondata<block_start>data=json_loads(jsondata)<line_sep># These things are required, and we can't proceed if they're not there <try_stmt><block_start>newbroker=cls(data['host'] id=broker_id port=data['port'])<block_end><except_stmt>KeyError<block_start><raise>ConfigurationException("Cannot parse broker data in zookeeper. This version of Kafka may not be supported.")<block_end># These things are optional, and are pulled in for convenience or extra features <for_stmt>attr ['jmx_port' 'rack' 'version' 'timestamp']<block_start><try_stmt><block_start>setattr(newbroker attr data[attr])<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end># if the broker defines multiple endpoints, newbroker._set_endpoints(data.get('endpoints' []))<line_sep><return>newbroker<block_end><def_stmt>_set_endpoints self endpoints<block_start>endpoint_re=re.compile("(.*)://(.*):([0-9]+)" re.I)<for_stmt>endpoint endpoints<block_start>m=endpoint_re.match(endpoint)<if_stmt>m<is><not><none><block_start>self.endpoints.append(Endpoint(m.group(1) m.group(2) int(m.group(3))))<block_end><block_end><block_end># Shallow copy - do not copy partitions map over <def_stmt>copy self<block_start>newbroker=Broker(self.hostname id=self.id port=self.port)<line_sep>newbroker.jmx_port=self.jmx_port<line_sep>newbroker.port=self.port<line_sep>newbroker.rack=self.rack<line_sep>newbroker.version=self.version<line_sep>newbroker.endpoints=self.endpoints<line_sep>newbroker.timestamp=self.timestamp<line_sep>newbroker.cluster=self.cluster<line_sep><return>newbroker<block_end><def_stmt>num_leaders self<block_start><return>self.num_partitions_at_position(0)<block_end><def_stmt>num_partitions_at_position self pos=0<block_start><if_stmt>pos<in>self.partitions<block_start><return>len(self.partitions[pos])<block_end><else_stmt><block_start><return>pos<block_end><block_end><def_stmt>percent_leaders self<block_start><if_stmt>self.num_partitions()<eq>0<block_start><return>0.0<block_end><return>(self.num_leaders()/self.num_partitions())<times>100<block_end><def_stmt>total_size self<block_start><return>sum([p.size<for>pos self.partitions<for>p self.partitions[pos]] 0)<block_end><def_stmt>num_partitions self<block_start><return>sum([len(self.partitions[pos])<for>pos self.partitions] 0)<block_end><def_stmt>get_endpoint self protocol<block_start><for_stmt>endpoint self.endpoints<block_start><if_stmt>endpoint.protocol<eq>protocol<block_start><return>endpoint<block_end><block_end><return>self.endpoint<block_end><def_stmt>_get_socket self sslcontext<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_STREAM)<if_stmt>sslcontext<is><not><none><block_start>sock=sslcontext.wrap_socket(sock server_hostname=self.hostname)<block_end><return>sock<block_end><def_stmt>connect self<block_start>protocol='SSL'<if>self._configuration.ssl_context<is><not><none><else>'PLAINTEXT'<line_sep>endpoint=self.get_endpoint(protocol)<line_sep>log.info("Connecting to {0} on port {1} using {2}".format(self.hostname self.port protocol))<try_stmt><block_start>self._sock=self._sock<or>self._get_socket(self._configuration.ssl_context)<line_sep>self._sock.connect((endpoint.hostname endpoint.port))<block_end><except_stmt>socket.error<as>e<block_start>log.error("Cannot connect to broker {0}:{1}: {2}".format(endpoint.hostname endpoint.port e))<line_sep><raise>ConnectionError("Cannot connect to broker {0}:{1}: {2}".format(endpoint.hostname endpoint.port e))<block_end><block_end><def_stmt>close self<block_start>log.info("Disconnecting from {0}".format(self.hostname))<line_sep># Shutdown throws an error if the socket is not connected, but that's OK <try_stmt><block_start>self._sock.shutdown(socket.SHUT_RDWR)<block_end><except_stmt>OSError<block_start><pass><block_end>self._sock.close()<line_sep>self._sock=<none><block_end><def_stmt>send self request<block_start>attempts=0<while_stmt>attempts<l>self._configuration.num_retries<block_start>attempts<augadd>1<try_stmt># Connect to the broker if not currently connected <block_start><if_stmt>self._sock<is><none><block_start>self.connect()<block_end><return>self._single_send(request)<block_end><except_stmt>ConnectionError<as>e<block_start><if_stmt>attempts<ge>self._configuration.num_retries<block_start>log.error("Failed communicating with Kafka broker {0}. retries remaining = 0: {1}".format(self.id e))<line_sep><raise><block_end><else_stmt><block_start>log.warn("Failed communicating with Kafka broker {0}. retries remaining = {1}: {2}".format(self.id self._configuration.num_retries-attempts e))<block_end><block_end># Sleep for the backoff period before retrying the request, and force a reconnect self.close()<line_sep>time.sleep(self._configuration.retry_backoff)<block_end><block_end><def_stmt>_single_send self request# Build the payload based on the request passed in. We'll fill in the size at the end <block_start>buf=ByteBuffer(self._configuration.max_request_size)<line_sep>buf.putInt32(0)<line_sep>buf.putInt16(request.api_key)<line_sep>buf.putInt16(request.api_version)<line_sep>buf.putInt32(self._correlation_id)<line_sep>buf.putInt16(len(self._configuration.client_id))<line_sep>buf.put(struct.pack('{0}s'.format(len(self._configuration.client_id)) self._configuration.client_id.encode("utf-8")))<line_sep>request.encode(buf)<line_sep># Close the payload and write the size (payload size without the size field itself) buf.limit=buf.position-1<line_sep>payload_len=buf.capacity-4<line_sep>buf.rewind()<line_sep>buf.putInt32(payload_len)<line_sep>buf.rewind()<line_sep># Increment the correlation ID for the next request self._correlation_id<augadd>1<try_stmt># Send the payload bytes to the broker <block_start>self._sock.sendall(buf.get(buf.capacity))<line_sep># Read the first 4 bytes so we know the size size=ByteBuffer(self._sock.recv(4)).getInt32()<line_sep># Read the response that we're expecting response_data=self._read_bytes(size)<line_sep>response=ByteBuffer(response_data)<line_sep># Parse off the correlation ID for the response correlation_id=response.getInt32()<block_end><except_stmt>EOFError<block_start><raise>ConnectionError("Failed to read enough data from Kafka")<block_end><except_stmt>socket.error<as>e<block_start><raise>ConnectionError("Failed communicating with Kafka: {0}".format(e))<block_end># Get the proper response class and parse the response <return>correlation_id request.response.from_bytebuffer(correlation_id response.slice())<block_end><def_stmt>_read_bytes self size<block_start>bytes_left=size<line_sep>responses=[]<while_stmt>bytes_left<block_start><try_stmt><block_start>data=self._sock.recv(min(bytes_left 4096))<block_end><except_stmt>socket.error<block_start><raise>socket.error("Unable to receive data from Kafka")<block_end><if_stmt>data<eq>b''<block_start><raise>socket.error("Not enough data to read message -- did server kill socket?")<block_end>bytes_left<augsub>len(data)<line_sep>responses.append(data)<block_end><return>b''.join(responses)<block_end><def_stmt>to_dict self<block_start><return>{'id':self.id 'hostname':self.hostname 'jmx_port':self.jmx_port 'port':self.port 'rack':self.rack 'version':self.version}<block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<line_sep># Based on # https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641 <def_stmt>_gradient_x img:torch.Tensor<arrow>torch.Tensor<block_start><if_stmt>len(img.shape)<ne>4<block_start><raise>AssertionError(img.shape)<block_end><return>img[: : : :-1]-img[: : : 1:]<block_end><def_stmt>_gradient_y img:torch.Tensor<arrow>torch.Tensor<block_start><if_stmt>len(img.shape)<ne>4<block_start><raise>AssertionError(img.shape)<block_end><return>img[: : :-1 :]-img[: : 1: :]<block_end><def_stmt>inverse_depth_smoothness_loss idepth:torch.Tensor image:torch.Tensor<arrow>torch.Tensor<block_start>r"""Criterion that computes image-aware inverse depth smoothness loss. .. math:: \text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \| \partial_x I_{ij} \right \|} + \left | \partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|} Args: idepth: tensor with the inverse depth with shape :math:`(N, 1, H, W)`. image: tensor with the input image with shape :math:`(N, 3, H, W)`. Return: a scalar with the computed loss. Examples: >>> idepth = torch.rand(1, 1, 4, 5) >>> image = torch.rand(1, 3, 4, 5) >>> loss = inverse_depth_smoothness_loss(idepth, image) """<if_stmt><not>isinstance(idepth torch.Tensor)<block_start><raise>TypeError(f"Input idepth type is not a torch.Tensor. Got {type(idepth)}")<block_end><if_stmt><not>isinstance(image torch.Tensor)<block_start><raise>TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}")<block_end><if_stmt><not>len(idepth.shape)<eq>4<block_start><raise>ValueError(f"Invalid idepth shape, we expect BxCxHxW. Got: {idepth.shape}")<block_end><if_stmt><not>len(image.shape)<eq>4<block_start><raise>ValueError(f"Invalid image shape, we expect BxCxHxW. Got: {image.shape}")<block_end><if_stmt><not>idepth.shape[-2:]<eq>image.shape[-2:]<block_start><raise>ValueError(f"idepth and image shapes must be the same. Got: {idepth.shape} and {image.shape}")<block_end><if_stmt><not>idepth.device<eq>image.device<block_start><raise>ValueError(f"idepth and image must be in the same device. Got: {idepth.device} and {image.device}")<block_end><if_stmt><not>idepth.dtype<eq>image.dtype<block_start><raise>ValueError(f"idepth and image must be in the same dtype. Got: {idepth.dtype} and {image.dtype}")<block_end># compute the gradients idepth_dx:torch.Tensor=_gradient_x(idepth)<line_sep>idepth_dy:torch.Tensor=_gradient_y(idepth)<line_sep>image_dx:torch.Tensor=_gradient_x(image)<line_sep>image_dy:torch.Tensor=_gradient_y(image)<line_sep># compute image weights weights_x:torch.Tensor=torch.exp(-torch.mean(torch.abs(image_dx) dim=1 keepdim=<true>))<line_sep>weights_y:torch.Tensor=torch.exp(-torch.mean(torch.abs(image_dy) dim=1 keepdim=<true>))<line_sep># apply image weights to depth smoothness_x:torch.Tensor=torch.abs(idepth_dx<times>weights_x)<line_sep>smoothness_y:torch.Tensor=torch.abs(idepth_dy<times>weights_y)<line_sep><return>torch.mean(smoothness_x)+torch.mean(smoothness_y)<block_end><class_stmt>InverseDepthSmoothnessLoss(nn.Module)<block_start>r"""Criterion that computes image-aware inverse depth smoothness loss. .. math:: \text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \| \partial_x I_{ij} \right \|} + \left | \partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|} Shape: - Inverse Depth: :math:`(N, 1, H, W)` - Image: :math:`(N, 3, H, W)` - Output: scalar Examples: >>> idepth = torch.rand(1, 1, 4, 5) >>> image = torch.rand(1, 3, 4, 5) >>> smooth = InverseDepthSmoothnessLoss() >>> loss = smooth(idepth, image) """<def_stmt>forward self idepth:torch.Tensor image:torch.Tensor<arrow>torch.Tensor<block_start><return>inverse_depth_smoothness_loss(idepth image)<block_end><block_end>
num=int(input(" Input a Number: "))<def_stmt>factorsOf num<block_start>factors=[]<for_stmt>i range(1 num+1)<block_start><if_stmt>num%i<eq>0<block_start>factors.append(i)<block_end><block_end>print(factors)<block_end>factorsOf(num)<line_sep>
"""LiteJet constants."""<import_from_stmt>homeassistant.const Platform<line_sep>DOMAIN="litejet"<line_sep>CONF_EXCLUDE_NAMES="exclude_names"<line_sep>CONF_INCLUDE_SWITCHES="include_switches"<line_sep>PLATFORMS=[Platform.LIGHT Platform.SCENE Platform.SWITCH]<line_sep>CONF_DEFAULT_TRANSITION="default_transition"<line_sep>
# 并查集的代码模板 <class_stmt>UnionFind<block_start><def_stmt>__init__ self n:int<block_start>self.count=n<line_sep>self.parent=[i<for>i range(n)]<block_end><def_stmt>find self p:int<block_start>temp=p<while_stmt>p<ne>self.parent[p]<block_start>p=self.parent[p]<block_end><while_stmt>temp<ne>self.parent[p]<block_start>temp,self.parent[temp]=self.parent[temp] p<block_end><return>p<block_end><def_stmt>union self p q<block_start>pSet,qSet=self.find(p) self.find(q)<if_stmt>self.parent[pSet]<ne>qSet<block_start>self.parent[pSet]=qSet<line_sep>self.count<augsub>1<block_end><block_end><block_end>
# coding : utf-8 <import_stmt>re<import_stmt>os<import_stmt>sys<import_stmt>getpass<import_stmt>urllib3<import_stmt>requests<import_from_stmt>mutagen.easyid3 EasyID3<import_from_stmt>mutagen.mp3 MP3<import_from_stmt>mutagen.id3 ID3 APIC TIT2 TPE1 TALB USLT<line_sep># ID3 info: tagMap={'cover':APIC 'title':TIT2 'artist':TPE1 'album':TALB 'lyric':USLT}<line_sep>urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)<line_sep>headers={'User-agent':'Mozilla/5.0'}<line_sep>MSCDIR='./mp3'<line_sep># print(repr(s)[1:-1]) # deal with invalid encoded filename <class_stmt>netease_music<block_start><def_stmt>__init__ self path<block_start>'''path: direcotory that contains cache files'''<line_sep>self.path=os.path.abspath(path)<line_sep>self.id_name={i[:i.find('-')]:i<for>i os.listdir(path)<if>i.endswith('.uc')<or>i.endswith('.uc!')}<if_stmt>self.id_name<block_start><if_stmt><not>os.path.exists(MSCDIR)<block_start>os.mkdir(MSCDIR)<block_end>print('Input :' path)<line_sep>print('Output:' MSCDIR)<block_end><else_stmt><block_start>print('No cache file found in "{}"'.format(path))<block_end><block_end><def_stmt>getInfoFromWeb self musicId# xpath for name and lrc: # self.nameXpath ='//div[@class="tit"]/em[@class="f-ff2"]/text()' # self.lrcSentencePt=re.compile(r'\[\d+:\d+\.\d+\](.*?)\\n') # wrong (r'\[\d+,\d+\](\(\d+,\d+\)(\w))+\n') # api : # type=song, lyric, comments, detail, artist, album, search # eg API = 'https://api.imjad.cn/cloudmusic/?type=song&id=1234132' download music <block_start>dic={}<line_sep>url='http://music.163.com/api/song/detail/?ids=['+musicId+']'<line_sep>res=requests.get(url headers=headers).json()<line_sep>info=res['songs'][0]<line_sep>dic['artist']=[info['artists'][0]['name']]<line_sep>dic['title']=[info['name']]<line_sep>dic['cover']=[info['album']['picUrl']]<line_sep>dic['album']=[info['album']['name']]<line_sep><return>dic<block_end><def_stmt>getPath self dic musicId<block_start>'''get the name of music from info dict'''<line_sep>title=dic['title'][0]<line_sep>artist=dic['artist'][0]<line_sep>name=title+'('+artist+')'<for_stmt>i '>?*/\:"|<'<block_start>name=name.replace(i '-')# convert to valid chars for file name <block_end>name=re.sub('\s' '_' name)<line_sep>self.id_name[musicId]=name<line_sep><return>os.path.join(MSCDIR name+'.mp3')<block_end><def_stmt>decrypt self musicId name<block_start><def_stmt>_decrypt cachePath<block_start><with_stmt>open(cachePath 'rb')<as>f<block_start>btay=bytearray(f.read())<block_end><for_stmt>i,j enumerate(btay)<block_start>btay[i]=j^0xa3<block_end><return>btay<block_end>cachePath=os.path.join(self.path name)<line_sep>idpath=os.path.join(MSCDIR musicId+'.mp3')<line_sep>info=self.getInfoFromWeb(musicId)<line_sep>path=self.getPath(info musicId)<if_stmt><not>os.path.exists(path)<block_start><with_stmt>open(path 'wb')<as>f<block_start>f.write(bytes(_decrypt(cachePath)))<block_end><block_end>''' get info from index file if not os.path.exists(idpath): with open(idpath, 'wb') as f: f.write(bytes(_decrypt(cachePath))) try: info = dict(MP3(idpath, ID3=EasyID3)) except: info = {} if info != {}: path = self.getPath(info, musicId) if os.path.exists(path): os.remove(idpath) else: os.rename(idpath, path) else: os.remove(idpath) '''<line_sep><return>info path<block_end><def_stmt>getLyric self musicId<block_start>url='http://music.163.com/api/song/lyric?id='+musicId+'&lv=1&tv=-1'<line_sep>lrc=''<try_stmt><block_start>lyric=requests.get(url headers=headers).json()<line_sep>lrc=lyric['lrc']['lyric']<line_sep>tlrc=lyric['tlyric']['lyric']<line_sep># merge multi-lang lyrics dic={}<for_stmt>i lrc.splitlines()<block_start>a=i.replace('[' ']').strip().split("]")<line_sep>dic[a[1].strip()+' ']=a[-1].strip()<block_end>tdic={}<for_stmt>m tlrc.splitlines()<block_start>n=m.replace('[' ']').strip().split(']')<line_sep>tdic[n[1].strip()]=n[-1].strip()<block_end>dicCopy=dic.copy()<line_sep>dicCopy.update(tdic)<line_sep>lines=[]<for_stmt>k,v sorted(dicCopy.items() key=<lambda>item:item[0])<block_start>lines.append("[%s]%s"%(k.strip() v))<block_end>lrc="\n".join(lines)<block_end><except_stmt>Exception<as>e<block_start><pass><block_end><return>lrc<block_end><def_stmt>setID3 self lrc info path<block_start>tags=ID3(path)<line_sep># remove old unsychronized lyrics len(tags.getall("USLT"))<ne>0<and>tags.delall("USLT")<for_stmt>t ['album' 'title' 'artist']<block_start>t<in>info<and>tags.add(tagMap[t](encoding=3 lang='' desc='' text=info[t][0]))<block_end>'cover'<in>info<and>tags.add(APIC(encoding=3 mime='image/png' type=3 desc='cover' data=requests.get(info['cover'][0] stream=<true> headers=headers).raw.read()))<line_sep>tags.add(USLT(encoding=3 lang='eng' desc='aaa' text=lrc))<line_sep>tags.save()<block_end><def_stmt>getMusic self<block_start>ct=0# count converted files <for_stmt>musicId,name self.id_name.items()<block_start><try_stmt><block_start>info,path=self.decrypt(musicId name)<line_sep>ct<augadd>1<line_sep>print('[{}]'.format(ct).ljust(6)+self.id_name[musicId])<line_sep>self.setID3(self.getLyric(musicId) info path)<block_end><except_stmt>Exception<as>e<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>main path=''<block_start><if_stmt><not>path<block_start>pre='/'.join(os.getcwd().split(os.sep)[:3])<if_stmt>os.sys.platform.lower().startswith('win')# windows <block_start>path=pre+'/AppData/Local/Netease/CloudMusic/Cache/Cache'<block_end><else_stmt># mac or linux <block_start>path=pre+'/Library/Containers/com.netease.163music/Data/Caches/online_play_cache'<block_end><block_end><if_stmt>os.path.isdir(path)<block_start>netease_music(path).getMusic()<block_end><else_stmt><block_start>print('Directory "{}" does not exist, specify cache files directory instead'.format(path))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<if>len(sys.argv)<l>2<else>main(sys.argv[1])<block_end>
# vim: set ts=4 sw=4 expandtab: <import_from_stmt>puremvc.patterns.command SimpleCommand<import_from_stmt>libtimesheet.model.TimeProxy TimeProxy<import_from_stmt>libtimesheet.view.DialogMediator DialogMediator<import_from_stmt>libtimesheet.view.MenuMediator MenuMediator<import_from_stmt>libtimesheet.view.DatePickerMediator DatePickerMediator<import_from_stmt>libtimesheet.view.TimeGridMediator TimeGridMediator<import_from_stmt>libtimesheet.view.SummaryMediator SummaryMediator<class_stmt>StartupCommand(SimpleCommand)<block_start><def_stmt>execute self note<block_start>self.facade.registerProxy(TimeProxy())<line_sep>mainPanel=note.getBody()<line_sep>self.facade.registerMediator(DialogMediator(mainPanel))<line_sep>self.facade.registerMediator(MenuMediator(mainPanel.menuBar))<line_sep>self.facade.registerMediator(TimeGridMediator(mainPanel.timeGrid))<line_sep>self.facade.registerMediator(SummaryMediator(mainPanel.summary))<line_sep># This one must be registered last, or at least after TimeGridMediator # Fires DATE_SELECTED notification, which is used in TimeGridMediator self.facade.registerMediator(DatePickerMediator(mainPanel.datePicker))<block_end><block_end>
<import_from_stmt>mine *<import_from_stmt>sys argv<import_from_stmt>random randint<line_sep>DIRS=((1 0) (0 1) (-1 0) (0 -1))<def_stmt>generateMaze xSize ySize start=(0 0) dirs=DIRS inside=<none><block_start><if_stmt>inside<eq><none><block_start>inside=<lambda>xy:0<le>xy[0]<l>xSize<and>0<le>xy[1]<l>ySize<block_end><def_stmt>move pos dir<block_start><return>(pos[0]+dirs[dir][0] pos[1]+dirs[dir][1])<block_end>nDirs=len(dirs)<def_stmt>findDir v<block_start><for_stmt>i range(nDirs)<block_start><if_stmt>dirs[i]<eq>v<block_start><return>i<block_end><block_end><raise>Exception("Mismatched direction")<block_end>revDir=tuple(findDir((-dirs[i][0] -dirs[i][1]))<for>i range(nDirs))<line_sep>visited=tuple([<false><for>j range(ySize)]<for>i range(xSize))<line_sep>walls=tuple(tuple([<true><for>j range(nDirs)]<for>j range(ySize))<for>i range(xSize))<line_sep>pos=start<def_stmt>getVisited pos<block_start><return><not>inside(pos)<or>visited[pos[0]][pos[1]]<block_end>stack=[]<while_stmt><true><block_start>visited[pos[0]][pos[1]]=<true><line_sep>nUnvisited=0<for_stmt>dir range(nDirs)<block_start><if_stmt><not>getVisited(move(pos dir))<block_start>nUnvisited<augadd>1<block_end><block_end><if_stmt>nUnvisited<eq>0<block_start><if_stmt>stack<block_start>pos=stack.pop()<line_sep><continue><block_end><else_stmt><block_start><break><block_end><block_end>n=randint(0 nUnvisited-1)<line_sep>dir=0<while_stmt><true><block_start><if_stmt><not>getVisited(move(pos dir))<block_start><if_stmt>n<eq>0<block_start><break><block_end>n<augsub>1<block_end>dir<augadd>1<block_end>walls[pos[0]][pos[1]][dir]=<false><line_sep>pos=move(pos dir)<line_sep>walls[pos[0]][pos[1]][revDir[dir]]=<false><line_sep>stack.append(pos)<block_end><return>walls<block_end>xSize=40<line_sep>ySize=40<line_sep>b=block.STONE<if_stmt>len(argv)<g>1<block_start>xSize=int(argv[1])<line_sep>ySize=xSize<if_stmt>len(argv)<g>2<block_start>b=Block.byName(argv[2])<block_end><block_end>mc=Minecraft()<line_sep>walls=generateMaze(xSize ySize)<line_sep>pos=mc.player.getTilePos()<line_sep>pos.x<augadd>1<line_sep>my=pos.y<for_stmt>x range(xSize)<block_start><for_stmt>y range(ySize)<block_start>mx=2<times>x+pos.x<line_sep>mz=2<times>y+pos.z<def_stmt>set d1 d2<block_start>mc.setBlock(mx+d1 my mz+d2 b)<line_sep>mc.setBlock(mx+d1 my+1 mz+d2 b)<block_end><for_stmt>dir range(len(DIRS))<block_start><if_stmt>walls[x][y][dir]<block_start>set(DIRS[dir][0] DIRS[dir][1])<block_end><block_end>set(1 1)<line_sep>set(1 -1)<line_sep>set(-1 1)<line_sep>set(-1 -1)<block_end><block_end>mc.setBlock(pos.x-1 pos.y pos.z block.AIR)<line_sep>mc.setBlock(pos.x-1 pos.y+1 pos.z block.AIR)<line_sep>mc.setBlock(pos.x+2<times>(xSize-1)+1 pos.y-1 pos.z+2<times>(ySize-1) block.GOLD_BLOCK)<line_sep>mc.setBlockWithNBT(pos.x+2<times>(xSize-1)+1 pos.y pos.z+2<times>(ySize-1) block.SIGN('EXIT' headingAngle=270))<line_sep>mc.setBlock(pos.x+2<times>(xSize-1)+1 pos.y+1 pos.z+2<times>(ySize-1) block.AIR)<line_sep>
LICNECE=""" Copyright © 2021 Drillenissen#4268 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_from_stmt>colored fg attr<import_stmt>requests<import_stmt>time<line_sep>r=fg(241)# Setup color variables r2=fg(255)<line_sep>b=fg(31)<line_sep>w=fg(15)<def_stmt>bumper <block_start>headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7' 'Authorization':input(f"\n {r2}[{b}?{r2}] Token: ")}<line_sep>id=input(f" {r2}[{b}?{r2}] Channel ID: ")<line_sep>print(f" {r2}[{b}!{r2}] Use ^C to exit")<line_sep>time.sleep(.3)<line_sep>print("")<while_stmt><true><block_start>requests.post(f"https://discord.com/api/channels/{id}/messages" headers=headers json={"content":"!d bump"})<line_sep>print(f" {r2}[{b}+{r2}] Server Bumped")<line_sep>time.sleep(121<times>60)<block_end><block_end>
<import_from_stmt>utils.utils_profiling *# load before other local modules <import_stmt>argparse<import_stmt>os<import_stmt>sys<import_stmt>warnings<line_sep>warnings.simplefilter(action='ignore' category=FutureWarning)<import_stmt>dgl<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>wandb<import_stmt>time<import_stmt>datetime<import_from_stmt>torch optim<import_stmt>torch.nn<as>nn<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>experiments.nbody.nbody_dataloader RIDataset<import_from_stmt>utils utils_logging<import_from_stmt>experiments.nbody nbody_models<as>models<import_from_stmt>equivariant_attention.from_se3cnn.SO3 rot<import_from_stmt>experiments.nbody.nbody_flags get_flags<def_stmt>to_np x<block_start><return>x.cpu().detach().numpy()<block_end><def_stmt>get_acc pred x_T v_T y=<none> verbose=<true><block_start>acc_dict={}<line_sep>pred=to_np(pred)<line_sep>x_T=to_np(x_T)<line_sep>v_T=to_np(v_T)<assert_stmt>len(pred)<eq>len(x_T)<if_stmt>verbose<block_start>y=np.asarray(y.cpu())<line_sep>_sq=(pred-y)<power>2<line_sep>acc_dict['mse']=np.mean(_sq)<block_end>_sq=(pred[: 0 :]-x_T)<power>2<line_sep>acc_dict['pos_mse']=np.mean(_sq)<line_sep>_sq=(pred[: 1 :]-v_T)<power>2<line_sep>acc_dict['vel_mse']=np.mean(_sq)<line_sep><return>acc_dict<block_end><def_stmt>train_epoch epoch model loss_fnc dataloader optimizer schedul FLAGS<block_start>model.train()<line_sep>loss_epoch=0<line_sep>num_iters=len(dataloader)<line_sep>wandb.log({"lr":optimizer.param_groups[0]['lr']} commit=<false>)<for_stmt>i,(g y1 y2) enumerate(dataloader)<block_start>g=g.to(FLAGS.device)<line_sep>x_T=y1.to(FLAGS.device).view(-1 3)<line_sep>v_T=y2.to(FLAGS.device).view(-1 3)<line_sep>y=torch.stack([x_T v_T] dim=1)<line_sep>optimizer.zero_grad()<line_sep># run model forward and compute loss pred=model(g)<line_sep>loss=loss_fnc(pred y)<line_sep>loss_epoch<augadd>to_np(loss)<if_stmt>torch.isnan(loss)<block_start><import_stmt>pdb<line_sep>pdb.set_trace()<block_end># backprop loss.backward()<line_sep>optimizer.step()<line_sep># print to console <if_stmt>i%FLAGS.print_interval<eq>0<block_start>print(f"[{epoch}|{i}] loss: {loss:.5f}")<block_end># log to wandb <if_stmt>i%FLAGS.log_interval<eq>0# 'commit' is only set to True here, meaning that this is where # wandb counts the steps <block_start>wandb.log({"Train Batch Loss":to_np(loss)} commit=<true>)<block_end># exit early if only do profiling <if_stmt>FLAGS.profile<and>i<eq>10<block_start>sys.exit()<block_end>schedul.step(epoch+i/num_iters)<block_end># log train accuracy for entire epoch to wandb loss_epoch<augdiv>len(dataloader)<line_sep>wandb.log({"Train Epoch Loss":loss_epoch} commit=<false>)<block_end><def_stmt>test_epoch epoch model loss_fnc dataloader FLAGS dT<block_start>model.eval()<line_sep>keys=['pos_mse' 'vel_mse']<line_sep>acc_epoch={k:0.0<for>k keys}<line_sep>acc_epoch_blc={k:0.0<for>k keys}# for constant baseline acc_epoch_bll={k:0.0<for>k keys}# for linear baseline loss_epoch=0.0<for_stmt>i,(g y1 y2) enumerate(dataloader)<block_start>g=g.to(FLAGS.device)<line_sep>x_T=y1.view(-1 3)<line_sep>v_T=y2.view(-1 3)<line_sep>y=torch.stack([x_T v_T] dim=1).to(FLAGS.device)<line_sep># run model forward and compute loss pred=model(g).detach()<line_sep>loss_epoch<augadd>to_np(loss_fnc(pred y)/len(dataloader))<line_sep>acc=get_acc(pred x_T v_T y=y)<for_stmt>k keys<block_start>acc_epoch[k]<augadd>acc[k]/len(dataloader)<block_end># eval constant baseline bl_pred=torch.zeros_like(pred)<line_sep>acc=get_acc(bl_pred x_T v_T verbose=<false>)<for_stmt>k keys<block_start>acc_epoch_blc[k]<augadd>acc[k]/len(dataloader)<block_end># eval linear baseline # Apply linear update to locations. bl_pred[: 0 :]=dT<times>g.ndata['v'][: 0 :]<line_sep>acc=get_acc(bl_pred x_T v_T verbose=<false>)<for_stmt>k keys<block_start>acc_epoch_bll[k]<augadd>acc[k]/len(dataloader)<block_end><block_end>print(f"...[{epoch}|test] loss: {loss_epoch:.5f}")<line_sep>wandb.log({"Test loss":loss_epoch} commit=<false>)<for_stmt>k keys<block_start>wandb.log({"Test "+k:acc_epoch[k]} commit=<false>)<block_end>wandb.log({'Const. BL pos_mse':acc_epoch_blc['pos_mse']} commit=<false>)<line_sep>wandb.log({'Linear BL pos_mse':acc_epoch_bll['pos_mse']} commit=<false>)<line_sep>wandb.log({'Linear BL vel_mse':acc_epoch_bll['vel_mse']} commit=<false>)<block_end><class_stmt>RandomRotation(object)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>__call__ self x<block_start>M=np.random.randn(3 3)<line_sep>Q,__=np.linalg.qr(M)<line_sep><return>x@Q<block_end><block_end><def_stmt>collate samples<block_start>graphs,y1,y2=map(list zip(*samples))<line_sep>batched_graph=dgl.batch(graphs)<line_sep><return>batched_graph torch.stack(y1) torch.stack(y2)<block_end><def_stmt>main FLAGS UNPARSED_ARGV# Prepare data <block_start>train_dataset=RIDataset(FLAGS split='train')<line_sep>train_loader=DataLoader(train_dataset batch_size=FLAGS.batch_size shuffle=<true> collate_fn=collate num_workers=FLAGS.num_workers drop_last=<true>)<line_sep>test_dataset=RIDataset(FLAGS split='test')<line_sep># drop_last is only here so that we can count accuracy correctly; test_loader=DataLoader(test_dataset batch_size=FLAGS.batch_size shuffle=<false> collate_fn=collate num_workers=FLAGS.num_workers drop_last=<true>)<line_sep># time steps <assert_stmt>train_dataset.data['delta_T']<eq>test_dataset.data['delta_T']<assert_stmt>train_dataset.data['sample_freq']<eq>test_dataset.data['sample_freq']<line_sep>print(f'deltaT: {train_dataset.data["delta_T"]}, '<concat>f'freq: {train_dataset.data["sample_freq"]}, '<concat>f'FLAGS.ri_delta_t: {FLAGS.ri_delta_t}')<line_sep>dT=train_dataset.data['delta_T']<times>train_dataset.data['sample_freq']<times>FLAGS.ri_delta_t<line_sep>FLAGS.train_size=len(train_dataset)<line_sep>FLAGS.test_size=len(test_dataset)<assert_stmt>len(test_dataset)<l>len(train_dataset)<line_sep>model=models.__dict__.get(FLAGS.model)(FLAGS.num_layers FLAGS.num_channels num_degrees=FLAGS.num_degrees div=FLAGS.div n_heads=FLAGS.head si_m=FLAGS.simid si_e=FLAGS.siend x_ij=FLAGS.xij)<line_sep>utils_logging.write_info_file(model FLAGS=FLAGS UNPARSED_ARGV=UNPARSED_ARGV wandb_log_dir=wandb.run.dir)<if_stmt>FLAGS.restore<is><not><none><block_start>model.load_state_dict(torch.load(FLAGS.restore))<block_end>model.to(FLAGS.device)<line_sep># Optimizer settings optimizer=optim.Adam(model.parameters() lr=FLAGS.lr)<line_sep>scheduler=optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer FLAGS.num_epochs eta_min=1e-4)<line_sep>criterion=nn.MSELoss()<line_sep>criterion=criterion.to(FLAGS.device)<line_sep>task_loss=criterion<line_sep># Save path save_path=os.path.join(FLAGS.save_dir FLAGS.name+'.pt')<line_sep># Run training print('Begin training')<for_stmt>epoch range(FLAGS.num_epochs)<block_start>torch.save(model.state_dict() save_path)<line_sep>print(f"Saved: {save_path}")<line_sep>train_epoch(epoch model task_loss train_loader optimizer scheduler FLAGS)<line_sep>test_epoch(epoch model task_loss test_loader FLAGS dT)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>FLAGS,UNPARSED_ARGV=get_flags()<line_sep>os.makedirs(FLAGS.save_dir exist_ok=<true>)<line_sep># Log all args to wandb wandb.init(project='equivariant-attention' name=FLAGS.name config=FLAGS)<line_sep>wandb.save('*.txt')<line_sep># Where the magic is <try_stmt><block_start>main(FLAGS UNPARSED_ARGV)<block_end><except_stmt>Exception<block_start><import_stmt>pdb traceback<line_sep>traceback.print_exc()<line_sep>pdb.post_mortem()<block_end><block_end>