content
stringlengths
0
1.55M
# _*_ coding: utf-8 _*_ """ Created by Allen7D on 2020/6/9. """<import_from_stmt>importlib import_module<import_from_stmt>flask current_app render_template redirect<import_from_stmt>app.core.error APIException<line_sep>__author__='Allen7D'<def_stmt>apply_default_view app<block_start>''' :param app: Flask实例 :return: '''<line_sep>app.config.from_object('app.extensions.default_view.config')<line_sep>@app.route('/')<def_stmt>index <block_start>'''跳转到「首页」'''<line_sep>url={'github':current_app.config['GITHUB_URL'] 'doc':current_app.config['DOC_URL'] }<line_sep><return>render_template("index.html" url=url)<block_end>@app.route('/doc')<def_stmt>doc <block_start>'''跳转到「api文档」'''<line_sep><return>redirect('/apidocs/#/')<block_end>apply_error_code_view(app)<block_end><def_stmt>apply_error_code_view app<block_start><def_stmt>load_exception <block_start>module=import_module('app.libs.error_code')<line_sep>exception_list=[]<for_stmt>elem_name dir(module)<block_start>elem=getattr(module elem_name)<if_stmt>type(elem)<eq>type<and>issubclass(elem APIException)<block_start>exception_list.append(elem())<block_end><block_end>exception_list.sort(key=<lambda>x:x.error_code)<line_sep><return>exception_list<block_end>exception_list=load_exception()<line_sep>@app.route('/error_code')<def_stmt>error_code <block_start><return>render_template('error_code.html' exception_list=exception_list)<block_end><block_end>
__all__=["predict" "predict_from_dl" "convert_raw_predictions" "end2end_detect"]<import_from_stmt>icevision.imports *<import_from_stmt>icevision.utils *<import_from_stmt>icevision.core *<import_from_stmt>icevision.data *<import_from_stmt>icevision.models.utils _predict_from_dl<import_from_stmt>icevision.models.ross.efficientdet.dataloaders *<import_from_stmt>effdet DetBenchTrain DetBenchPredict unwrap_bench<import_from_stmt>icevision.models.inference *<line_sep>@torch.no_grad()<def_stmt>_predict_batch model:Union[DetBenchTrain DetBenchPredict] batch:Sequence[torch.Tensor] records:Sequence[BaseRecord] detection_threshold:float=0.5 keep_images:bool=<false> device:Optional[torch.device]=<none> <arrow>List[Prediction]<block_start>device=device<or>model_device(model)<line_sep>imgs,img_info=batch<line_sep>imgs=imgs.to(device)<line_sep>img_info={k:v.to(device)<for>k,v img_info.items()}<line_sep>bench=DetBenchPredict(unwrap_bench(model))<line_sep>bench=bench.eval().to(device)<line_sep>raw_preds=bench(x=imgs img_info=img_info)<line_sep>preds=convert_raw_predictions(batch=batch raw_preds=raw_preds records=records detection_threshold=detection_threshold keep_images=keep_images )<line_sep><return>preds<block_end><def_stmt>predict model:Union[DetBenchTrain DetBenchPredict] dataset:Dataset detection_threshold:float=0.5 keep_images:bool=<false> device:Optional[torch.device]=<none> <arrow>List[Prediction]<block_start>batch,records=build_infer_batch(dataset)<line_sep><return>_predict_batch(model=model batch=batch records=records detection_threshold=detection_threshold keep_images=keep_images device=device )<block_end><def_stmt>predict_from_dl model:nn.Module infer_dl:DataLoader show_pbar:bool=<true> keep_images:bool=<false> **predict_kwargs <block_start><return>_predict_from_dl(predict_fn=_predict_batch model=model infer_dl=infer_dl show_pbar=show_pbar keep_images=keep_images **predict_kwargs )<block_end><def_stmt>convert_raw_predictions batch raw_preds:torch.Tensor records:Sequence[BaseRecord] detection_threshold:float keep_images:bool=<false> <arrow>List[Prediction]<block_start>tensor_images,*_=batch<line_sep>dets=raw_preds.detach().cpu().numpy()<line_sep>preds=[]<for_stmt>det,record,tensor_image zip(dets records tensor_images)<block_start><if_stmt>detection_threshold<g>0<block_start>scores=det[: 4]<line_sep>keep=scores<g>detection_threshold<line_sep>det=det[keep]<block_end>pred=BaseRecord((ScoresRecordComponent() ImageRecordComponent() InstancesLabelsRecordComponent() BBoxesRecordComponent() ))<line_sep>pred.detection.set_class_map(record.detection.class_map)<line_sep>pred.detection.set_labels_by_id(det[: 5].astype(int))<line_sep>pred.detection.set_bboxes([BBox.from_xyxy(*xyxy)<for>xyxy det[: :4]])<line_sep>pred.detection.set_scores(det[: 4])<if_stmt>keep_images<block_start>record.set_img(tensor_to_image(tensor_image))<block_end>preds.append(Prediction(pred=pred ground_truth=record))<block_end><return>preds<block_end>end2end_detect=partial(_end2end_detect predict_fn=predict)<line_sep>
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) <import_stmt>pytest<import_from_stmt>datadog_checks.gunicorn GUnicornCheck<import_from_stmt>.common CHECK_NAME CONTAINER_NAME GUNICORN_VERSION INSTANCE<line_sep># TODO: Test metadata in e2e when we can collect metadata from the agent CHECK_ID='test:123'<def_stmt>_assert_metadata datadog_agent<block_start>major,minor,patch=GUNICORN_VERSION.split('.')<line_sep>version_metadata={'version.scheme':'semver' 'version.major':major 'version.minor':minor 'version.patch':patch 'version.raw':GUNICORN_VERSION }<line_sep>datadog_agent.assert_metadata(CHECK_ID version_metadata)<line_sep>datadog_agent.assert_metadata_count(5)<block_end>@pytest.mark.skipif(<not>GUNICORN_VERSION reason='Require GUNICORN_VERSION')<def_stmt>test_collect_metadata_instance aggregator datadog_agent setup_gunicorn<block_start>instance=INSTANCE.copy()<line_sep>instance['gunicorn']=setup_gunicorn['gunicorn_bin_path']<line_sep>check=GUnicornCheck(CHECK_NAME {} [instance])<line_sep>check.check_id=CHECK_ID<line_sep>check.check(instance)<line_sep>_assert_metadata(datadog_agent)<block_end>@pytest.mark.skipif(<not>GUNICORN_VERSION reason='Require GUNICORN_VERSION')<def_stmt>test_collect_metadata_init_config aggregator datadog_agent setup_gunicorn<block_start>init_config={'gunicorn':setup_gunicorn['gunicorn_bin_path']}<line_sep>check=GUnicornCheck(CHECK_NAME init_config [INSTANCE])<line_sep>check.check_id=CHECK_ID<line_sep>check.check(INSTANCE)<line_sep>_assert_metadata(datadog_agent)<block_end>@pytest.mark.skipif(<not>GUNICORN_VERSION reason='Require GUNICORN_VERSION')@pytest.mark.usefixtures('dd_environment')<def_stmt>test_collect_metadata_docker aggregator datadog_agent setup_gunicorn<block_start>instance=INSTANCE.copy()<line_sep>instance['gunicorn']='docker exec {} gunicorn'.format(CONTAINER_NAME)<line_sep>check=GUnicornCheck(CHECK_NAME {} [instance])<line_sep>check.check_id=CHECK_ID<line_sep>check.check(instance)<line_sep>_assert_metadata(datadog_agent)<block_end><def_stmt>test_collect_metadata_count aggregator datadog_agent setup_gunicorn<block_start>instance=INSTANCE.copy()<line_sep>instance['gunicorn']=setup_gunicorn['gunicorn_bin_path']<line_sep>check=GUnicornCheck(CHECK_NAME {} [instance])<line_sep>check.check_id='test:123'<line_sep>check.check(instance)<line_sep>datadog_agent.assert_metadata_count(5)<block_end><def_stmt>test_collect_metadata_invalid_binary aggregator datadog_agent setup_gunicorn<block_start>instance=INSTANCE.copy()<line_sep>instance['gunicorn']='/bin/not_exist'<line_sep>check=GUnicornCheck(CHECK_NAME {} [instance])<line_sep>check.check_id=CHECK_ID<line_sep>check.check(instance)<line_sep>datadog_agent.assert_metadata_count(0)<block_end>
# -*- coding: utf-8 -*- """Streaming price data. demonstrate the PricingStream request and convenient handling of data using Pydantic. Usage: streaming_prices.py --instrument <instrument> [--instrument <instrument>] [--nice] [--timeout <timeout>] [--count <count>] Options: --nice json indented formatting --timeout=<timeout> timeout in seconds --count=<count> # of records to receive [default: 0] unlimited """<import_stmt>json<import_from_stmt>oandapyV20 API<import_from_stmt>oandapyV20.exceptions V20Error StreamTerminated<import_from_stmt>oandapyV20.endpoints.pricing PricingStream<import_from_stmt>exampleauth exampleAuth<import_from_stmt>requests.exceptions ConnectionError<import_stmt>logging<import_from_stmt>typing List<import_from_stmt>pydantic BaseModel<import_from_stmt>datetime datetime<line_sep>logging.basicConfig(filename="pricingstream.log" level=logging.INFO format='%(asctime)s [%(levelname)s] %(name)s : %(message)s' )<line_sep>logger=logging.getLogger(__name__)<class_stmt>HeartBeat(BaseModel)<block_start>type:str<line_sep>time:datetime<block_end><class_stmt>Price(BaseModel)<block_start>price:float<line_sep>liquidity:int<block_end><class_stmt>PriceRecord(BaseModel)<block_start>instrument:str<line_sep>type:str<line_sep>time:datetime<line_sep>closeoutBid:float<line_sep>closeoutAsk:float<line_sep>status:str<line_sep>tradeable:bool<line_sep>bids:List[Price]<line_sep>asks:List[Price]<block_end><def_stmt>main clargs<block_start>accountID,access_token=exampleAuth()<line_sep>request_params={}<if_stmt>clargs['--timeout']<block_start>request_params={"timeout":clargs['--timeout']}<block_end># fetch MAXREC stream records MAXREC=int(clargs['--count'])<line_sep>api=API(access_token=access_token environment="practice" request_params=request_params)<line_sep># setup the stream request r=PricingStream(accountID=accountID params={"instruments":",".join(clargs['<instrument>'])})<line_sep>n=0<line_sep>_m={"PRICE":PriceRecord "HEARTBEAT":HeartBeat}<while_stmt><true><block_start><try_stmt><block_start><for_stmt>rv api.request(r)# create a Pydantic record based on the type <block_start>rec=_m[rv['type']](**rv)<line_sep>n<augadd>1<if_stmt>MAXREC<and>n<ge>MAXREC<block_start>r.terminate("maxrecs received: {}".format(MAXREC))<block_end>print(rec.json()<if>clargs['--nice']<else>rec)<block_end><block_end><except_stmt>V20Error<as>e# catch API related errors that may occur <block_start>logger.error("V20Error: %s" e)<line_sep><break><block_end><except_stmt>ConnectionError<as>e<block_start>logger.error("%s" e)<block_end><except_stmt>StreamTerminated<as>e<block_start>logger.error("Stopping: %s" e)<line_sep><break><block_end><except_stmt>Exception<as>e<block_start>logger.error("%s" e)<line_sep><break><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>docopt docopt<line_sep># commandline args ... clargs=docopt(__doc__)<line_sep>main(clargs)<block_end>
<import_from_stmt>office365.runtime.client_value ClientValue<class_stmt>FollowedItem(ClientValue)<block_start><pass><block_end>
<import_from_stmt>.dns_record_base_exception DnsRecordBaseException<class_stmt>AddError(DnsRecordBaseException)<block_start><pass><block_end>
"""Abstraction for authentication based on HAP/SRP."""<import_stmt>binascii<import_from_stmt>enum Enum auto<import_from_stmt>typing Optional Tuple<import_from_stmt>pyatv exceptions<line_sep># pylint: disable=invalid-name <class_stmt>AuthenticationType(Enum)<block_start>"""Supported authentication type."""<line_sep>Null=auto()<line_sep>"""No authentication (just pass through)."""<line_sep>Legacy=auto()<line_sep>"""Legacy SRP based authentication."""<line_sep>HAP=auto()<line_sep>"""Authentication based on HAP (Home-Kit)."""<line_sep>Transient=auto()<line_sep>"""Authentication based on transient HAP (Home-Kit)."""<block_end># pylint: enable=invalid-name <class_stmt>HapCredentials<block_start>"""Identifiers and encryption keys used by HAP."""<def_stmt>__init__ self ltpk:bytes=b"" ltsk:bytes=b"" atv_id:bytes=b"" client_id:bytes=b"" <arrow><none><block_start>"""Initialize a new Credentials."""<line_sep>self.ltpk:bytes=ltpk<line_sep>self.ltsk:bytes=ltsk<line_sep>self.atv_id:bytes=atv_id<line_sep>self.client_id:bytes=client_id<line_sep>self.type:AuthenticationType=self._get_auth_type()<block_end><def_stmt>_get_auth_type self<arrow>AuthenticationType<block_start><if_stmt>(self.ltpk<eq>b""<and>self.ltsk<eq>b""<and>self.atv_id<eq>b""<and>self.client_id<eq>b"")<block_start><return>AuthenticationType.Null<block_end><if_stmt>self.ltpk<eq>b"transient"<block_start><return>AuthenticationType.Transient<block_end><if_stmt>(self.ltpk<eq>b""<and>self.ltsk<ne>b""<and>self.atv_id<eq>b""<and>self.client_id<ne>b"")<block_start><return>AuthenticationType.Legacy<block_end><if_stmt>self.ltpk<and>self.ltsk<and>self.atv_id<and>self.client_id<block_start><return>AuthenticationType.HAP<block_end><raise>exceptions.InvalidCredentialsError("invalid credentials type")<block_end><def_stmt>__eq__ self other:object<arrow>bool<block_start>"""Return if two instances of HapCredentials are equal."""<if_stmt>isinstance(other HapCredentials)<block_start><return>str(other)<eq>str(self)<block_end><return><false><block_end><def_stmt>__str__ self<arrow>str<block_start>"""Return a string representation of credentials."""<line_sep><return>":".join([binascii.hexlify(self.ltpk).decode("utf-8") binascii.hexlify(self.ltsk).decode("utf-8") binascii.hexlify(self.atv_id).decode("utf-8") binascii.hexlify(self.client_id).decode("utf-8") ])<block_end><block_end><class_stmt>PairSetupProcedure<block_start>"""Perform pair setup procedure to authenticate a new device."""<async_keyword><def_stmt>start_pairing self<arrow><none><block_start>"""Start the pairing process. This method will show the expected PIN on screen. """<block_end><async_keyword><def_stmt>finish_pairing self username:str pin_code:int<arrow>HapCredentials<block_start>"""Finish pairing process. A username and the PIN code (usually shown on screen) must be provided. """<block_end><block_end><class_stmt>PairVerifyProcedure<block_start>"""Verify if credentials are valid and derive encryption keys."""<async_keyword><def_stmt>verify_credentials self<arrow>bool<block_start>"""Verify if credentials are valid and returns True if keys are generated."""<block_end><def_stmt>encryption_keys self salt:str output_info:str input_info:str<arrow>Tuple[str str]<block_start>"""Return derived encryption keys."""<block_end><block_end>NO_CREDENTIALS=HapCredentials()<line_sep>TRANSIENT_CREDENTIALS=HapCredentials(b"transient")<def_stmt>parse_credentials detail_string:Optional[str]<arrow>HapCredentials<block_start>"""Parse a string represention of HapCredentials."""<if_stmt>detail_string<is><none><block_start><return>NO_CREDENTIALS<block_end>split=detail_string.split(":")<line_sep># Compatibility with "legacy credentials" used by AirPlay where seed is stored # as LTSK and identifier as client_id (others are empty). <if_stmt>len(split)<eq>2<block_start>client_id=binascii.unhexlify(split[0])<line_sep>ltsk=binascii.unhexlify(split[1])<line_sep><return>HapCredentials(b"" ltsk b"" client_id)<block_end><if_stmt>len(split)<eq>4<block_start>ltpk=binascii.unhexlify(split[0])<line_sep>ltsk=binascii.unhexlify(split[1])<line_sep>atv_id=binascii.unhexlify(split[2])<line_sep>client_id=binascii.unhexlify(split[3])<line_sep><return>HapCredentials(ltpk ltsk atv_id client_id)<block_end><raise>exceptions.InvalidCredentialsError("invalid credentials: "+detail_string)<block_end>
<import_stmt>pytest<import_from_stmt>tartiflette Resolver create_engine<line_sep>_SDL=""" type Query { hello(name: String = "Unknown"): String bye(name: String! = "Unknown"): String } """<line_sep>@pytest.fixture(scope="module")<async_keyword><def_stmt>ttftt_engine <block_start>@Resolver("Query.hello" schema_name="test_issue213")<async_keyword><def_stmt>resolve_query_hello parent args ctx info<block_start><return>args.get("name")<block_end><class_stmt>QueryByResolver<block_start><async_keyword><def_stmt>__call__ self parent args ctx info<block_start><return>args.get("name")<block_end><block_end>Resolver("Query.bye" schema_name="test_issue213")(QueryByResolver())<line_sep><return><await>create_engine(sdl=_SDL schema_name="test_issue213")<block_end>@[email protected]("query,variables,expected" [# Without variables (""" query { hello } """ <none> {"data":{"hello":"Unknown"}} ) (""" query { hello(name: "Name") } """ <none> {"data":{"hello":"Name"}} ) (""" query { hello(name: null) } """ <none> {"data":{"hello":<none>}} ) (""" query { bye } """ <none> {"data":{"bye":"Unknown"}} ) (""" query { bye(name: "Name") } """ <none> {"data":{"bye":"Name"}} ) (""" query { bye(name: null) } """ <none> {"data":<none> "errors":[{"message":"Argument < name > of non-null type < String! > must not be null." "path":["bye"] "locations":[{"line":3 "column":19}] "extensions":{"rule":"5.6.1" "spec":"June 2018" "details":"https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type" "tag":"values-of-correct-type" } }] } ) # With variables (""" query ($name: String) { hello(name: $name) } """ {} {"data":{"hello":"Unknown"}} ) (""" query ($name: String) { hello(name: $name) } """ {"name":"Name"} {"data":{"hello":"Name"}} ) (""" query ($name: String) { hello(name: $name) } """ {"name":<none>} {"data":{"hello":<none>}} ) (""" query ($name: String) { bye(name: $name) } """ {} {"data":{"bye":"Unknown"}} ) (""" query ($name: String) { bye(name: $name) } """ {"name":"Name"} {"data":{"bye":"Name"}} ) (""" query ($name: String) { bye(name: $name) } """ {"name":<none>} {"data":{"bye":<none>} "errors":[{"message":"Argument < name > of non-null type < String! > must not be null." "path":["bye"] "locations":[{"line":3 "column":25}] }] } ) (""" query ($name: String!) { bye(name: $name) } """ {"name":<none>} {"data":<none> "errors":[{"message":"Variable < $name > of non-null type < String! > must not be null." "path":<none> "locations":[{"line":2 "column":20}] }] } ) ] )<async_keyword><def_stmt>test_issue213 query variables expected ttftt_engine<block_start><assert_stmt><await>ttftt_engine.execute(query variables=variables)<eq>expected<block_end>
<import_stmt>json<import_from_stmt>pynodered node_red<import_from_stmt>platypush.context get_plugin<line_sep># noinspection PyUnusedLocal @node_red(name='run' title='run' category='platypush' description='Run a platypush action')<def_stmt>run node msg<block_start>msg=msg['payload']<if_stmt>isinstance(msg bytes)<block_start>msg=msg.decode()<block_end><if_stmt>isinstance(msg str)<block_start>msg=json.loads(msg)<block_end><assert_stmt>isinstance(msg dict)<and>'action'<in>msg<if_stmt>'type'<not><in>msg<block_start>msg['type']='request'<block_end>plugin_name='.'.join(msg['action'].split('.')[:-1])<line_sep>action_name=msg['action'].split('.')[-1]<line_sep>plugin=get_plugin(plugin_name)<line_sep>action=getattr(plugin action_name)<line_sep>args=msg.get('args' {})<line_sep>response=action(**args)<if_stmt>response.errors<block_start><raise>response.errors[0]<block_end>msg['payload']=response.output<line_sep><return>msg<block_end># vim:sw=4:ts=4:et:
<import_from_stmt>tessagon.core.tile_generator TileGenerator<import_from_stmt>tessagon.core.abstract_tile AbstractTile<class_stmt>RotateTileGenerator(TileGenerator)# This generates tiles that are rotated from a regular # grid arrangement. <block_start><def_stmt>__init__ self tessagon **kwargs<block_start>super().__init__(tessagon **kwargs)<line_sep>self.rot_factor=kwargs['rot_factor']<line_sep>self.color_pattern=kwargs.get('color_pattern')<or><none><line_sep># Rot tiles are not tiles, they are a collection of tiles. # They generate interior tiles ((rot_factor - 1)^2 of them) and # up to 2 * rot_factor boundary tiles that are shared with neighbors # (if they exist). # Maximum tiles generated per rot_tile is rot_factor^2 + 1 tiles # With this in mind, you'll want to set u_num and v_num lower than # you would with the grid tile generator self.rot_tiles=<none><line_sep>self.id_prefix='rot_tiles'<block_end><def_stmt>create_tiles self<block_start>self.rot_tiles=self.initialize_tiles(RotTile rot_factor=self.rot_factor color_pattern=self.color_pattern)<line_sep>self.initialize_neighbors(self.rot_tiles)<line_sep>self.initialize_interiors()<line_sep>self.initialize_boundaries()<line_sep>self.calculate_boundary_neighbors()<line_sep><return>self.calculate_rot_tiles()<block_end><def_stmt>initialize_interiors self<block_start><for_stmt>rot_tile [j<for>i self.rot_tiles<for>j i]<block_start>rot_tile.initialize_interior()<block_end><block_end><def_stmt>initialize_boundaries self<block_start><for_stmt>rot_tile [j<for>i self.rot_tiles<for>j i]<block_start>rot_tile.initialize_boundary()<block_end><block_end><def_stmt>calculate_boundary_neighbors self<block_start><for_stmt>rot_tile [j<for>i self.rot_tiles<for>j i]<block_start>rot_tile.calculate_boundary_neighbors()<block_end><block_end><def_stmt>calculate_rot_tiles self<block_start>tiles=[]<for_stmt>rot_tile [j<for>i self.rot_tiles<for>j i]<block_start>tiles<augadd>rot_tile.create_tiles()<block_end><return>tiles<block_end><block_end># This is both a kind of tile and a tile generator # It hurts my brain thinking about this stuff <class_stmt>RotTile(AbstractTile)<block_start><def_stmt>__init__ self tessagon **kwargs<block_start>super().__init__(tessagon **kwargs)<line_sep>self.n=kwargs['rot_factor']<line_sep># the interior and each boundary is a collection of tiles self.interior=<none><line_sep>self.boundary={'left':<none> 'right':<none> 'top':<none> 'bottom':<none>}<line_sep>self.interior_corners=<none><line_sep>self.color_pattern=kwargs.get('color_pattern')<or><none><line_sep>self.u_num=self.tessagon.tile_generator.u_num<line_sep># We'll use these constants a lot n2_p1=self.n<power>2+1.0<line_sep>self.c1=1.0/n2_p1<line_sep>self.c2=self.n/n2_p1<line_sep>self.c3=1.0-self.c2<line_sep>self.c4=1.0-self.c1<line_sep>self.tiles=[]<block_end><def_stmt>initialize_interior self<block_start>self.interior_corners=[self.blend(self.c2 self.c1) self.blend(self.c4 self.c2) self.blend(self.c1 self.c3) self.blend(self.c3 self.c4)]<if_stmt>self.n<l>2<block_start><return><block_end>offset=self.basic_offset(self.fingerprint)<line_sep>generator=TileGenerator(self.tessagon corners=self.interior_corners u_num=self.n-1 v_num=self.n-1 u_cyclic=<false> v_cyclic=<false> id_prefix=self.id+'.interior' color_pattern=self.color_pattern fingerprint_offset=offset)<line_sep>self.interior=generator.initialize_tiles(self.tessagon.__class__.tile_class)<line_sep>generator.initialize_neighbors(self.interior)<line_sep>self.tiles<augadd>self._flatten_list(self.interior)<block_end><def_stmt>basic_offset self fingerprint<block_start><return>[fingerprint[0]<times>self.n+fingerprint[1]+1 self.u_num-fingerprint[0]+fingerprint[1]<times>self.n]<block_end><def_stmt>create_tiles self<block_start><return>self.tiles<block_end><def_stmt>initialize_boundary self<block_start>self.initialize_left_boundary(self.id+".boundary['left']")<line_sep>self.initialize_right_boundary(self.id+".boundary['right']")<line_sep>self.initialize_top_boundary(self.id+".boundary['top']")<line_sep>self.initialize_bottom_boundary(self.id+".boundary['bottom']")<block_end><def_stmt>initialize_left_boundary self id_prefix<block_start><if_stmt><not>self.boundary['left']<block_start>tile=self.get_neighbor_tile(['left'])<if_stmt>tile<block_start>corners=[self.blend(0 0) self.blend(self.c2 self.c1) self.blend(self.c3-1.0 self.c4) self.blend(0 1)]<line_sep>offset=self.basic_offset(self.fingerprint)<line_sep>offset[0]<augsub>1<line_sep>generator=TileGenerator(self.tessagon corners=corners u_num=1 v_num=self.n u_cyclic=<false> v_cyclic=<false> id_prefix=id_prefix color_pattern=self.color_pattern fingerprint_offset=offset)<line_sep>tiles=generator.initialize_tiles(self.tessagon.tile_class)<line_sep>generator.initialize_neighbors(tiles)<line_sep>self.boundary['left']=tiles<line_sep>tile.boundary['right']=tiles<line_sep>self.tiles<augadd>self._flatten_list(tiles)<block_end><block_end><block_end><def_stmt>initialize_bottom_boundary self id_prefix<block_start><if_stmt><not>self.boundary['bottom']<block_start>tile=self.get_neighbor_tile(['bottom'])<if_stmt>tile<block_start>corners=[self.blend(self.c1 self.c3-1.0) self.blend(1 0) self.blend(0 0) self.blend(self.c4 self.c2)]<line_sep>offset=self.basic_offset(self.fingerprint)<line_sep>offset[0]<augsub>1<line_sep>offset[1]<augsub>1<line_sep>generator=TileGenerator(self.tessagon corners=corners u_num=self.n v_num=1 u_cyclic=<false> v_cyclic=<false> id_prefix=id_prefix color_pattern=self.color_pattern fingerprint_offset=offset)<line_sep>tiles=generator.initialize_tiles(self.tessagon.tile_class)<line_sep>generator.initialize_neighbors(tiles)<line_sep>self.boundary['bottom']=tiles<line_sep>tile.boundary['top']=tiles<line_sep>self.tiles<augadd>self._flatten_list(tiles)<block_end><block_end><block_end><def_stmt>initialize_right_boundary self id_prefix<block_start><if_stmt><not>self.boundary['right']<block_start>tile=self.get_neighbor_tile(['right'])<if_stmt>tile<block_start>tile.initialize_left_boundary(id_prefix)<block_end><block_end><block_end><def_stmt>initialize_top_boundary self id_prefix<block_start><if_stmt><not>self.boundary['top']<block_start>tile=self.get_neighbor_tile(['top'])<if_stmt>tile<block_start>tile.initialize_bottom_boundary(id_prefix)<block_end><block_end><block_end><def_stmt>calculate_boundary_neighbors self<block_start>self.calculate_left_boundary_neighbors()<line_sep>self.calculate_right_boundary_neighbors()<line_sep>self.calculate_top_boundary_neighbors()<line_sep>self.calculate_bottom_boundary_neighbors()<block_end><def_stmt>calculate_left_boundary_neighbors self<block_start><if_stmt>self.boundary['left']<block_start><for_stmt>i range(self.n-1)<block_start>boundary_tile=self.boundary['left'][0][i]<line_sep>other_tile=<none><if_stmt>self.n<g>1<block_start>other_tile=self.interior[0][i]<block_end><if_stmt>other_tile<block_start>boundary_tile.neighbors['right']=other_tile<line_sep>other_tile.neighbors['left']=boundary_tile<block_end><block_end><if_stmt>self.boundary['top']<block_start>boundary_tile=self.boundary['left'][0][self.n-1]<line_sep>other_tile=self.boundary['top'][0][0]<line_sep>boundary_tile.neighbors['right']=other_tile<line_sep>other_tile.neighbors['left']=boundary_tile<block_end><block_end><block_end><def_stmt>calculate_bottom_boundary_neighbors self<block_start><if_stmt>self.boundary['bottom']<block_start><for_stmt>i range(self.n-1)<block_start>boundary_tile=self.boundary['bottom'][i+1][0]<line_sep>other_tile=<none><if_stmt>self.n<g>1<block_start>other_tile=self.interior[i][0]<block_end><if_stmt>other_tile<block_start>boundary_tile.neighbors['top']=other_tile<line_sep>other_tile.neighbors['bottom']=boundary_tile<block_end><block_end><if_stmt>self.boundary['left']<block_start>boundary_tile=self.boundary['bottom'][0][0]<line_sep>other_tile=self.boundary['left'][0][0]<line_sep>boundary_tile.neighbors['top']=other_tile<line_sep>other_tile.neighbors['bottom']=boundary_tile<block_end><block_end><block_end><def_stmt>calculate_right_boundary_neighbors self<block_start><if_stmt>self.boundary['right']<block_start><for_stmt>i range(self.n-1)<block_start>boundary_tile=self.boundary['right'][0][i+1]<line_sep>other_tile=<none><if_stmt>self.n<g>1<block_start>other_tile=self.interior[self.n-2][i]<block_end><if_stmt>other_tile<block_start>boundary_tile.neighbors['left']=other_tile<line_sep>other_tile.neighbors['right']=boundary_tile<block_end><block_end><if_stmt>self.boundary['bottom']<block_start>boundary_tile=self.boundary['right'][0][0]<line_sep>other_tile=self.boundary['bottom'][self.n-1][0]<line_sep>boundary_tile.neighbors['left']=other_tile<line_sep>other_tile.neighbors['right']=boundary_tile<block_end><block_end><block_end><def_stmt>calculate_top_boundary_neighbors self<block_start><if_stmt>self.boundary['top']<block_start><for_stmt>i range(self.n-1)<block_start>boundary_tile=self.boundary['top'][i][0]<line_sep>other_tile=<none><if_stmt>self.n<g>1<block_start>other_tile=self.interior[i][self.n-2]<block_end><if_stmt>other_tile<block_start>boundary_tile.neighbors['bottom']=other_tile<line_sep>other_tile.neighbors['top']=boundary_tile<block_end><block_end><if_stmt>self.boundary['right']<block_start>boundary_tile=self.boundary['top'][self.n-1][0]<line_sep>other_tile=self.boundary['right'][0][self.n-1]<line_sep>boundary_tile.neighbors['bottom']=other_tile<line_sep>other_tile.neighbors['top']=boundary_tile<block_end><block_end><block_end><def_stmt>_flatten_list self l<block_start><return>[item<for>sublist l<for>item sublist]<block_end><block_end>
# Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for partitioning."""<import_stmt>functools<import_stmt>itertools<import_stmt>logging<import_stmt>re<import_from_stmt>unittest mock<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<import_from_stmt>vmoe partitioning<line_sep>PartitionSpec=partitioning.PartitionSpec<class_stmt>PartitioningTest(parameterized.TestCase)<block_start>@parameterized.parameters((0 <true>) (1 <false>) (2 <true>))<def_stmt>test_process_has_contiguous_device_slice self process_index expected<block_start><def_stmt>mk_dev process_index<block_start><return>_make_device(process_index=process_index)<block_end>devices=np.asarray([[mk_dev(0) mk_dev(0) mk_dev(1)] [mk_dev(0) mk_dev(0) mk_dev(2)] [mk_dev(0) mk_dev(0) mk_dev(1)] ])<line_sep>self.assertEqual(partitioning.process_has_contiguous_device_slice(devices process_index) expected)<block_end>@parameterized.named_parameters(('false' [[0 0 1] [0 0 2] [0 0 1]] <false>) ('true' [[0 0 0] [0 0 0] [1 1 1]] <true>) )<def_stmt>test_processes_have_contiguous_device_slices self devices expected<block_start><def_stmt>mk_dev process_index<block_start><return>_make_device(process_index=process_index)<block_end>devices=np.asarray(devices)<line_sep>devices=np.vectorize(mk_dev otypes=[np.object])(devices)<line_sep>self.assertEqual(partitioning.processes_have_contiguous_device_slices(devices) expected)<block_end>@parameterized.parameters(('other') ('tpu'))<def_stmt>test_get_auto_logical_mesh self platform<block_start>"""Tests that the right auto_logical_mesh is run, based on the platform."""<line_sep>hardware_mesh=mock.MagicMock()<line_sep>device=_make_device(platform=platform)<with_stmt>mock.patch.object(partitioning f'get_hardware_mesh_{platform}' return_value=hardware_mesh)<block_start><with_stmt>mock.patch.object(partitioning f'get_auto_logical_mesh_{platform}')<as>mock_get<block_start>partitioning.get_auto_logical_mesh(2 [device])<line_sep>mock_get.assert_called_with(2 hardware_mesh)<block_end><block_end><block_end>@parameterized.named_parameters(('2' 2 (2 1)) ('4' 4 (4 1)) ('8' 8 (4 2)) )@mock.patch.object(partitioning 'get_logical_mesh')<def_stmt>test_get_auto_logical_mesh_other self num_partitions expected_tuple get_logical_mesh_mock<block_start>"""Tests that each axis is partitioned as expected on devices != TPU."""<line_sep>hardware_mesh=np.empty((4 8))<line_sep>partitioning.get_auto_logical_mesh_other(num_partitions hardware_mesh)<line_sep>get_logical_mesh_mock.assert_called_with(expected_tuple hardware_mesh)<block_end><def_stmt>test_get_auto_logical_mesh_other_error self<block_start>"""Tests that an exception is raised if the number of partitions is not supported."""<line_sep>hardware_mesh=np.empty((3 5))<with_stmt>self.assertRaisesRegex(ValueError 'The hardware mesh with shape')<block_start>partitioning.get_auto_logical_mesh_other(2 hardware_mesh)<block_end><block_end>@parameterized.named_parameters(('v3_2' 2 (2 2 4 1) (1 2 1 1)) ('v3_4' 4 (2 2 4 1) (1 2 2 1)) ('v3_8' 8 (2 2 4 1) (1 2 4 1)) ('v3_16' 16 (2 2 4 1) (2 2 4 1)) ('v4_2' 2 (2 2 4 2) (1 1 1 2)) ('v4_4' 4 (2 2 4 2) (1 1 2 2)) ('v4_8' 8 (2 2 4 2) (1 1 4 2)) ('v4_16' 16 (2 2 4 2) (1 2 4 2)) ('v4_32' 32 (2 2 4 2) (2 2 4 2)) )@mock.patch.object(partitioning 'get_logical_mesh')<def_stmt>test_get_auto_logical_mesh_tpu self num_partitions hardware_mesh_shape expected_tuple get_logical_mesh_mock<block_start>"""Tests that each axis is partitioned as expected on TPU devices."""<line_sep>hardware_mesh=np.empty(hardware_mesh_shape)<line_sep>partitioning.get_auto_logical_mesh_tpu(num_partitions hardware_mesh)<line_sep>get_logical_mesh_mock.assert_called_with(expected_tuple hardware_mesh)<block_end><def_stmt>test_get_auto_logical_mesh_tpu_error self<block_start>"""Tests that an exception is raised if the number of partitions is not supported."""<line_sep>hardware_mesh=np.empty((3 5 7 9))<with_stmt>self.assertRaisesRegex(ValueError 'The hardware mesh with shape')<block_start>partitioning.get_auto_logical_mesh_tpu(6 hardware_mesh)<block_end><block_end>@parameterized.named_parameters(('cpu0' (0 0) (0 0)) ('cpu1' (23 5) (3 5)) )@mock.patch.object(partitioning.jax 'local_device_count' return_value=4)<def_stmt>test_get_device_coords_other self device_attrs expected_coord _<block_start>"""Tests that the device coordinates are good for devices other than TPU."""<line_sep>device_id,process_id=device_attrs<line_sep>device=_make_device(id=device_id process_index=process_id platform='cpu')<line_sep>self.assertTupleEqual(partitioning.get_device_coords_other(device) expected_coord)<block_end>@parameterized.named_parameters(('tpu0' (0 0 0 0)) ('tpu1' (0 1 2 3)) )<def_stmt>test_get_device_coords_tpu self expected_coord<block_start>"""Tests that the device coordinates are good for TPU devices."""<line_sep>core_on_chip,x,y,z=expected_coord<line_sep>device=_make_device(core_on_chip=core_on_chip coords=(x y z) platform='tpu')<line_sep>self.assertTupleEqual(partitioning.get_device_coords_tpu(device) expected_coord)<block_end><def_stmt>test_get_hardware_mesh_local_shape self<block_start>local_devices=[# Local devices presented in arbitrary order. _make_device(core_on_chip=0 coords=(2 2 0) platform='tpu') _make_device(core_on_chip=0 coords=(2 3 0) platform='tpu') _make_device(core_on_chip=0 coords=(3 2 0) platform='tpu') _make_device(core_on_chip=0 coords=(3 1 0) platform='tpu') _make_device(core_on_chip=0 coords=(3 3 0) platform='tpu') _make_device(core_on_chip=0 coords=(2 1 0) platform='tpu') ]<line_sep>shape=partitioning.get_hardware_mesh_local_shape(local_devices)<line_sep>expected_shape=(1 2 3 1)<line_sep>self.assertEqual(shape expected_shape)<block_end>@mock.patch.object(partitioning.jax 'local_device_count' return_value=2)<def_stmt>test_get_hardware_mesh_other self _<block_start>"""Tests the hardware mesh (with 6 total CPU devices in 2 processes)."""<line_sep>devices=[]<for_stmt>process_index range(3)<block_start><for_stmt>device_id range(process_index<times>2 process_index<times>2+2)<block_start>devices.append(_make_device(id=device_id process_index=process_index platform='cpu'))<block_end><block_end>hardware_mesh=partitioning.get_hardware_mesh_other(devices)<line_sep>expected_hardware_mesh=np.array([[devices[0] devices[2] devices[4]] [devices[1] devices[3] devices[5]]])<line_sep>np.testing.assert_array_equal(hardware_mesh expected_hardware_mesh)<block_end><def_stmt>test_get_hardware_mesh_tpu self<block_start>"""Tests the hardware mesh (with 12 TPU devices, in a (2, 3, 1, 2) mesh)."""<line_sep>devices=[]<for_stmt>z,y,x,core_on_chip itertools.product(range(2) range(3) range(1) range(2))<block_start>devices.append(_make_device(core_on_chip=core_on_chip coords=(x y z) platform='tpu'))<block_end>hardware_mesh=partitioning.get_hardware_mesh_tpu(devices)<line_sep>expected_hardware_mesh=np.array([# core_on_chip=0. [[[devices[0] devices[6]] [devices[2] devices[8]] [devices[4] devices[10]]]] # core_on_chip=1. [[[devices[1] devices[7]] [devices[3] devices[9]] [devices[5] devices[11]]]]] dtype=np.object)<line_sep>np.testing.assert_array_equal(hardware_mesh expected_hardware_mesh)<block_end><def_stmt>test_get_logical_mesh_default self<block_start>"""Tests the logical mesh with a 2x4 hardware mesh."""<line_sep># Note: The values in hardware_mesh would typically be Devices, but these # are fine for testing. This is a 2x4 hardware mesh. hardware_mesh=np.array([[1 2 3 4] # partition_ids: 0 0 1 1 [5 6 7 8]])<line_sep># 2 2 3 3 partitions,replicas=(2 2) (1 2)<line_sep>mesh=partitioning.get_logical_mesh_default(partitions replicas hardware_mesh)<line_sep>self.assertIsInstance(mesh partitioning.maps.Mesh)<line_sep>np.testing.assert_array_equal(mesh.devices [[1 2] [3 4] [5 6] [7 8]])<line_sep>self.assertTupleEqual(mesh.axis_names ('expert' 'replica'))<block_end><def_stmt>test_get_logical_mesh_tile_by_process self# Note: The values in hardware_mesh would typically be Devices, but these # are fine for testing. This is a 2x4 hardware mesh. # partition_ids: 0 0 1 1 | process_ids: 0 1 2 3 # 2 2 3 3 | 0 1 2 3 <block_start>hardware_mesh=np.asarray([[1 2 3 4] [5 6 7 8]])<line_sep>partitions,replicas=(2 2) (1 2)<line_sep>hardware_mesh_local_shape=(2 1)<line_sep>mesh=partitioning.get_logical_mesh_tile_by_process(partitions replicas hardware_mesh hardware_mesh_local_shape)<line_sep>self.assertIsInstance(mesh partitioning.maps.Mesh)<line_sep>np.testing.assert_array_equal(mesh.devices [[1 2] [5 6] [3 4] [7 8]])<line_sep>self.assertTupleEqual(mesh.axis_names ('expert' 'replica'))<block_end><def_stmt>test_get_logical_mesh_tile_by_process_raises self<block_start>hardware_mesh=np.zeros((3 3))<line_sep>partitions,replicas=(3 1) (1 3)<line_sep>hardware_mesh_local_shape=(1 2)<with_stmt>self.assertRaises(ValueError)<block_start>partitioning.get_logical_mesh_tile_by_process(partitions replicas hardware_mesh hardware_mesh_local_shape)<block_end><block_end>@mock.patch.object(partitioning 'processes_have_contiguous_device_slices' return_value=<false>)@mock.patch.object(partitioning 'get_hardware_mesh_local_shape')<def_stmt>test_get_logical_mesh self mock_get_hardware_mesh_local_shape _# Note: The values in hardware_mesh would typically be Devices, but these # are fine for testing. This is a 2x4 hardware mesh. # partition_ids: 0 1 2 3 | process_ids: 0 0 2 3 # 0 1 2 3 | 1 1 2 3 <block_start>hardware_mesh=np.asarray([[1 2 3 4] [5 6 7 8]])<line_sep>mock_get_hardware_mesh_local_shape.return_value=(2 1)<line_sep>mesh=partitioning.get_logical_mesh((2 2) hardware_mesh)<line_sep>np.testing.assert_array_equal(mesh.devices [[1 2] [5 6] [3 4] [7 8]])<block_end><def_stmt>test_log_logical_mesh_tpu self<block_start>mk_dev=functools.partial(_make_device platform='tpu')<line_sep>devices=[[mk_dev(core_on_chip=0 coords=(0 0 0) process_index=0) mk_dev(core_on_chip=1 coords=(0 0 0) process_index=1) mk_dev(core_on_chip=0 coords=(10 0 0) process_index=10) mk_dev(core_on_chip=1 coords=(10 0 0) process_index=11) ] [mk_dev(core_on_chip=0 coords=(0 100 0) process_index=1) mk_dev(core_on_chip=1 coords=(0 100 0) process_index=2) mk_dev(core_on_chip=0 coords=(10 1 0) process_index=3) mk_dev(core_on_chip=1 coords=(10 1 0) process_index=4) ] ]<line_sep>mesh=partitioning.Mesh(devices=np.asarray(devices) axis_names=('a' 'b'))<line_sep>logger=logging.getLogger('foo')<with_stmt>self.assertLogs(logger)<as>cm<block_start>partitioning.log_logical_mesh(mesh logger=logger)<block_end>self.assertRegex(cm.output[0] re.escape("Logical device mesh has axis_names = ('a', 'b')"))<line_sep>self.assertRegex(cm.output[1] re.escape('Logical device mesh has shape = (2, 4)'))<line_sep>self.assertRegex(cm.output[2] 'Logical device mesh:')<line_sep>self.assertRegex(cm.output[3] '\\+[-]+\\+')<line_sep># pylint: disable=line-too-long self.assertRegex(cm.output[4] re.escape('| (0, 0, 0, 0)[ 0] (1, 0, 0, 0)[ 1] (0, 10, 0, 0)[10] (1, 10, 0, 0)[11] |'))<line_sep>self.assertRegex(cm.output[5] re.escape('| (0, 0, 100, 0)[ 1] (1, 0, 100, 0)[ 2] (0, 10, 1, 0)[ 3] (1, 10, 1, 0)[ 4] |'))<line_sep># pylint: enable=line-too-long self.assertRegex(cm.output[6] '\\+[-]+\\+')<block_end>@mock.patch.object(jax 'local_device_count' return_value=4)<def_stmt>test_log_logical_mesh_single_axis self unused_mock<block_start>devices=[_make_device(id=0 process_index=0 platform='cpu') _make_device(id=10 process_index=10 platform='cpu')]<line_sep>mesh=partitioning.Mesh(devices=np.asarray(devices) axis_names=('a' ))<line_sep>logger=logging.getLogger('foo')<with_stmt>self.assertLogs(logger)<as>cm<block_start>partitioning.log_logical_mesh(mesh logger=logger)<block_end>self.assertRegex(cm.output[0] re.escape("Logical device mesh has axis_names = ('a',)"))<line_sep>self.assertRegex(cm.output[1] re.escape('Logical device mesh has shape = (2,)'))<line_sep>self.assertRegex(cm.output[2] 'Logical device mesh:')<line_sep>self.assertRegex(cm.output[3] '\\+[-]+\\+')<line_sep>self.assertRegex(cm.output[4] re.escape('| (0, 0)[ 0] |'))<line_sep>self.assertRegex(cm.output[5] re.escape('| (2, 10)[10] |'))<line_sep>self.assertRegex(cm.output[6] '\\+[-]+\\+')<block_end><def_stmt>test_tree_global_shape self<block_start>"""Tests that global shape of arrays is obtained correctly."""<line_sep># Note: see _make_tree_axis_resources_mesh_test_data for additional details. tree,axis_resources,mesh=_make_tree_axis_resources_mesh_test_data()<line_sep>expected_global_aval={'v':jax.ShapedArray(shape=(5 5) dtype=jnp.float32) 'w':jax.ShapedArray(shape=(4<times>5 5) dtype=jnp.float32) 'x':jax.ShapedArray(shape=(4<times>2<times>5 5) dtype=jnp.float32) 'y':jax.ShapedArray(shape=(4<times>5 2<times>5) dtype=jnp.float32) 'z':jax.ShapedArray(shape=(4<times>3<times>5 2<times>5) dtype=jnp.float32) }<line_sep>global_aval=partitioning.tree_global_shape(tree axis_resources mesh)<line_sep>self.assertDictEqual(global_aval expected_global_aval)<block_end><def_stmt>test_tree_global_shape_raises_structs_not_match self<block_start>mesh=partitioning.Mesh(devices=np.zeros((4 4)) axis_names=('a' 'b'))<with_stmt>self.assertRaisesRegex(ValueError 'The tree structs do not match')<block_start>partitioning.tree_global_shape({'a':1 'b':2} {'c':PartitionSpec()} mesh)<block_end><block_end><def_stmt>test_tree_global_shape_raises_wrong_leaves self<block_start>mesh=partitioning.Mesh(devices=np.zeros((4 4)) axis_names=('a' 'b'))<with_stmt>self.assertRaisesRegex(ValueError 'the input tree must have')<block_start>partitioning.tree_global_shape({'a':1} {'a':PartitionSpec()} mesh)<block_end><block_end><block_end><class_stmt>ParsePartitionSpecTest(parameterized.TestCase)<block_start>@parameterized.named_parameters(('_none' <none> PartitionSpec()) ('_string' 'a' PartitionSpec('a')) ('_tuple' ('a' ('b' 'c')) PartitionSpec('a' ('b' 'c'))) ('_partition_spec' PartitionSpec('a') PartitionSpec('a')) )<def_stmt>test self spec expected<block_start>self.assertEqual(partitioning.parse_partition_spec(spec) expected)<block_end><block_end><class_stmt>TreeAxisResourcesFromRegexesTest(parameterized.TestCase)<block_start>@parameterized.named_parameters(('_empty_regexes' {'a':1 'b':2 'c':3} [] {'a':PartitionSpec() 'b':PartitionSpec() 'c':PartitionSpec()}) ('_single_string' {'a':1 'b':2 'c':3} [('b' 'x')] {'a':PartitionSpec() 'b':PartitionSpec('x') 'c':PartitionSpec()}) ('_first_match' {'a':1 'bb':2 'c':3} [('b' ('x' )) ('bb' ('x' 'y'))] {'a':PartitionSpec() 'bb':PartitionSpec('x') 'c':PartitionSpec()}) )<def_stmt>test self tree axis_resources_regexes expected<block_start>output=partitioning.tree_axis_resources_from_regexes(tree=tree axis_resources_regexes=axis_resources_regexes)<line_sep>self.assertEqual(output expected)<block_end><block_end><def_stmt>_make_device **kwargs<block_start>"""Returns a new mocked device."""<line_sep>device=mock.MagicMock(partitioning.Device)<for_stmt>key,value kwargs.items()<block_start>setattr(device key value)<block_end><return>device<block_end><def_stmt>_make_tree_axis_resources_mesh_test_data # Mesh of (4, 3, 2) devices. Each device resides in a different process to # simplify the calculation of global shapes of the arrays. <block_start>devices=np.asarray([_make_device(process_index=idx id=idx)<for>idx range(24)] dtype=np.object).reshape(4 3 2)<line_sep>mesh=partitioning.Mesh(devices axis_names=('a' 'b' 'c'))<line_sep># These shapes are those of the arrays in the process running the code # (i.e. process_index=0). tree={'v':jax.ShapedArray(shape=(5 5) dtype=jnp.float32) 'w':jax.ShapedArray(shape=(5 5) dtype=jnp.float32) 'x':jax.ShapedArray(shape=(5 5) dtype=jnp.float32) 'y':jax.ShapedArray(shape=(5 5) dtype=jnp.float32) 'z':jax.ShapedArray(shape=(5 5) dtype=jnp.float32) }<line_sep>axis_resources={# Array 'v' is not partitioned, each device holds a replica of this. # Thus, the global shape is (5, 5). 'v':<none> # Array 'w' has its first axis partitioned in 4 chunks across the # axis 'a' of the logical mesh. Thus, its global shape is (4 * 5, 5). 'w':PartitionSpec('a') # Array 'x' has its first axis partitioned in 4 * 2 chunks across the # axes 'a' and 'c' of the logical mesh. Thus its global shape is # (4 * 2 * 5, 5). 'x':PartitionSpec(('a' 'c') ) # Array 'y' has its first axis partitioned in 4 chunks (across logical # axis 'a') and the second axis partitioned in 2 chunks (across logical # axis 'c'). Thus its global shape is (4 * 5, 2 * 5). 'y':PartitionSpec('a' 'c') # Array 'z' has its first axis partitioned in 4 * 3 chunks, and the # second axis partitioned in 2 chunks. Its global shape is # (4 * 3 * 5, 2 * 5). 'z':PartitionSpec(('a' 'b') 'c') }<line_sep><return>tree axis_resources mesh<block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
# Third party modules <import_from_stmt>locust HttpUser between task<class_stmt>MyWebsiteUser(HttpUser)<block_start>wait_time=between(5 15)<line_sep>@task<def_stmt>load_main self<block_start>self.client.get("/")<block_end><block_end>
<import_stmt>json<import_from_stmt>typing TYPE_CHECKING<import_stmt>flask.testing<import_stmt>pyquery<class_stmt>Response<block_start>"""A Response wraps a response from a testing Client."""<def_stmt>__init__ self response<block_start>self.response=response<block_end>@property<def_stmt>text self<block_start><return>self.response.data.decode(self.response.charset)<block_end>@property<def_stmt>pq self<block_start><return>pyquery.PyQuery(self.text)<block_end>@property<def_stmt>json self<block_start><return>json.loads(self.text)<block_end><block_end><if_stmt>TYPE_CHECKING<block_start>ClientBase=flask.testing.FlaskClient[Response]<block_end><else_stmt><block_start>ClientBase=flask.testing.FlaskClient<block_end><class_stmt>Client(ClientBase)<block_start>"""A Client wraps the client given by flask to add other utilities."""<def_stmt>open self *args **kwargs<block_start><return>Response(super().open(*args **kwargs))<block_end><block_end>
# https://github.com/NVlabs/latentfusion/blob/master/latentfusion/three/utils.py <import_stmt>torch<import_from_stmt>torch.nn functional<as>F<def_stmt>farthest_points data n_clusters:int dist_func=F.pairwise_distance return_center_indexes=<true> return_distances=<false> verbose=<false> init_center=<true> <block_start>"""Performs farthest point sampling on data points. Args: data (torch.tensor): data points. n_clusters (int): number of clusters. dist_func (Callable): distance function that is used to compare two data points. return_center_indexes (bool): if True, returns the indexes of the center of clusters. return_distances (bool): if True, return distances of each point from centers. Returns clusters, [centers, distances]: clusters (torch.tensor): the cluster index for each element in data. centers (torch.tensor): the integer index of each center. distances (torch.tensor): closest distances of each point to any of the cluster centers. """<if_stmt>n_clusters<ge>data.shape[0]<block_start><if_stmt>return_center_indexes<block_start><return>(torch.arange(data.shape[0] dtype=torch.long) torch.arange(data.shape[0] dtype=torch.long))<block_end><return>torch.arange(data.shape[0] dtype=torch.long)<block_end>clusters=torch.full((data.shape[0] ) fill_value=-1 dtype=torch.long)<line_sep>centers=torch.zeros(n_clusters dtype=torch.long)<if_stmt>init_center<block_start>broadcasted_data=torch.mean(data 0 keepdim=<true>).expand(data.shape[0] -1)<line_sep>distances=dist_func(broadcasted_data data)<block_end><else_stmt><block_start>distances=torch.full((data.shape[0] ) fill_value=1e7 dtype=torch.float32)<block_end><for_stmt>i range(n_clusters)<block_start>center_idx=torch.argmax(distances)<line_sep>centers[i]=center_idx<line_sep>broadcasted_data=data[center_idx].unsqueeze(0).expand(data.shape[0] -1)<line_sep>new_distances=dist_func(broadcasted_data data)<line_sep>distances=torch.min(distances new_distances)<line_sep>clusters[distances<eq>new_distances]=i<if_stmt>verbose<block_start>print("farthest points max distance : {}".format(torch.max(distances)))<block_end><block_end><if_stmt>return_center_indexes<block_start><if_stmt>return_distances<block_start><return>clusters centers distances<block_end><return>clusters centers<block_end><return>clusters<block_end><def_stmt>get_fps_and_center_torch points num_fps:int init_center=<true> dist_func=F.pairwise_distance<block_start>center=torch.mean(points 0 keepdim=<true>)<line_sep>_,fps_inds=farthest_points(points n_clusters=num_fps dist_func=dist_func return_center_indexes=<true> init_center=init_center)<line_sep>fps_pts=points[fps_inds]<line_sep><return>torch.cat([fps_pts center] dim=0)<block_end>
<import_stmt>logging<import_from_stmt>django.contrib.auth.mixins PermissionRequiredMixin<import_from_stmt>django.urls reverse_lazy<import_from_stmt>django.views.generic CreateView DeleteView DetailView ListView UpdateView<import_from_stmt>zentral.contrib.osquery.forms FileCategoryForm<import_from_stmt>zentral.contrib.osquery.models FileCategory<line_sep>logger=logging.getLogger('zentral.contrib.osquery.views.file_categories')<class_stmt>FileCategoryListView(PermissionRequiredMixin ListView)<block_start>permission_required="osquery.view_filecategory"<line_sep>model=FileCategory<def_stmt>get_context_data self **kwargs<block_start>ctx=super().get_context_data(**kwargs)<line_sep>ctx["file_category_count"]=ctx["object_list"].count()<line_sep><return>ctx<block_end><block_end><class_stmt>CreateFileCategoryView(PermissionRequiredMixin CreateView)<block_start>permission_required="osquery.add_filecategory"<line_sep>model=FileCategory<line_sep>form_class=FileCategoryForm<block_end><class_stmt>FileCategoryView(PermissionRequiredMixin DetailView)<block_start>permission_required="osquery.view_filecategory"<line_sep>model=FileCategory<def_stmt>get_context_data self **kwargs<block_start>ctx=super().get_context_data(**kwargs)<line_sep>ctx["configurations"]=list(self.object.configuration_set.all().order_by("name" "pk"))<line_sep>ctx["configuration_count"]=len(ctx["configurations"])<line_sep><return>ctx<block_end><block_end><class_stmt>UpdateFileCategoryView(PermissionRequiredMixin UpdateView)<block_start>permission_required="osquery.change_filecategory"<line_sep>model=FileCategory<line_sep>form_class=FileCategoryForm<block_end><class_stmt>DeleteFileCategoryView(PermissionRequiredMixin DeleteView)<block_start>permission_required="osquery.delete_filecategory"<line_sep>model=FileCategory<line_sep>success_url=reverse_lazy("osquery:file_categories")<block_end>
"""Dataset implementations to save data for Kedro Experiment Tracking"""<line_sep>__all__=["MetricsDataSet" "JSONDataSet"]<import_from_stmt>contextlib suppress<with_stmt>suppress(ImportError)<block_start><import_from_stmt>kedro.extras.datasets.tracking.metrics_dataset MetricsDataSet<block_end><with_stmt>suppress(ImportError)<block_start><import_from_stmt>kedro.extras.datasets.tracking.json_dataset JSONDataSet<block_end>
<import_stmt>lz4.stream<import_stmt>pytest<import_stmt>sys<line_sep>_1KB=1024<line_sep>_1MB=_1KB<times>1024<line_sep>_1GB=_1MB<times>1024<def_stmt>compress x c_kwargs<block_start>c=[]<with_stmt>lz4.stream.LZ4StreamCompressor(**c_kwargs)<as>proc<block_start><for_stmt>start range(0 len(x) c_kwargs['buffer_size'])<block_start>chunk=x[start:start+c_kwargs['buffer_size']]<line_sep>block=proc.compress(chunk)<line_sep>c.append(block)<block_end><block_end><if_stmt>c_kwargs.get('return_bytearray' <false>)<block_start><return>bytearray().join(c)<block_end><else_stmt><block_start><return>bytes().join(c)<block_end><block_end><def_stmt>decompress x d_kwargs<block_start>d=[]<with_stmt>lz4.stream.LZ4StreamDecompressor(**d_kwargs)<as>proc<block_start>start=0<while_stmt>start<l>len(x)<block_start>block=proc.get_block(x[start:])<line_sep>chunk=proc.decompress(block)<line_sep>d.append(chunk)<line_sep>start<augadd>d_kwargs['store_comp_size']+len(block)<block_end><block_end><if_stmt>d_kwargs.get('return_bytearray' <false>)<block_start><return>bytearray().join(d)<block_end><else_stmt><block_start><return>bytes().join(d)<block_end><block_end>test_buffer_size=sorted([256 1<times>_1KB 64<times>_1KB 1<times>_1MB 1<times>_1GB lz4.stream.LZ4_MAX_INPUT_SIZE])<line_sep>@pytest.fixture(params=test_buffer_size ids=['buffer_size'+str(i)<for>i range(len(test_buffer_size))])<def_stmt>buffer_size request<block_start><return>request.param<block_end>test_data=[(b'a'<times>_1MB) ]<line_sep>@pytest.fixture(params=test_data ids=['data'+str(i)<for>i range(len(test_data))])<def_stmt>data request<block_start><return>request.param<block_end><def_stmt>test_block_decompress_mem_usage data buffer_size<block_start>kwargs={'strategy':"double_buffer" 'buffer_size':buffer_size 'store_comp_size':4 }<if_stmt>sys.maxsize<l>0xffffffff<block_start>pytest.skip('Py_ssize_t too small for this test')<block_end>tracemalloc=pytest.importorskip('tracemalloc')<line_sep># Trace memory usage on compression tracemalloc.start()<line_sep>prev_snapshot=<none><for_stmt>i range(1000)<block_start>compressed=compress(data kwargs)<if_stmt>i%100<eq>0<block_start>snapshot=tracemalloc.take_snapshot()<if_stmt>prev_snapshot# Filter on lz4.stream module'a allocations <block_start>stats=[x<for>x snapshot.compare_to(prev_snapshot 'lineno')<if>lz4.stream.__file__<in>x.traceback._frames[0][0]]<assert_stmt>sum(map(<lambda>x:x.size_diff stats))<l>(1024<times>4)<block_end>prev_snapshot=snapshot<block_end><block_end>tracemalloc.stop()<line_sep>tracemalloc.start()<line_sep>prev_snapshot=<none><for_stmt>i range(1000)<block_start>decompressed=decompress(compressed kwargs)# noqa: F841 <if_stmt>i%100<eq>0<block_start>snapshot=tracemalloc.take_snapshot()<if_stmt>prev_snapshot# Filter on lz4.stream module'a allocations <block_start>stats=[x<for>x snapshot.compare_to(prev_snapshot 'lineno')<if>lz4.stream.__file__<in>x.traceback._frames[0][0]]<assert_stmt>sum(map(<lambda>x:x.size_diff stats))<l>(1024<times>4)<block_end>prev_snapshot=snapshot<block_end><block_end>tracemalloc.stop()<block_end>
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. <import_stmt>math<import_stmt>numpy<as>np<import_stmt>os<import_stmt>xml.etree.ElementTree<as>ET<import_stmt>warp<as>wp<line_sep># SNU file format parser <class_stmt>MuscleUnit<block_start><def_stmt>__init__ self<block_start>self.name=""<line_sep>self.bones=[]<line_sep>self.points=[]<block_end><block_end><class_stmt>Skeleton<block_start><def_stmt>__init__ self root_xform skeleton_file muscle_file builder filter armature=0.0<block_start>self.parse_skeleton(skeleton_file builder filter root_xform armature)<line_sep>self.parse_muscles(muscle_file builder)<block_end><def_stmt>parse_skeleton self filename builder filter root_xform armature<block_start>file=ET.parse(filename)<line_sep>root=file.getroot()<line_sep>self.node_map={}# map node names to link indices self.xform_map={}# map node names to parent transforms self.mesh_map={}# map mesh names to link indices objects self.coord_start=builder.joint_coord_count<line_sep>self.dof_start=builder.joint_dof_count<line_sep>type_map={"Ball":wp.sim.JOINT_BALL "Revolute":wp.sim.JOINT_REVOLUTE "Prismatic":wp.sim.JOINT_PRISMATIC "Free":wp.sim.JOINT_FREE "Fixed":wp.sim.JOINT_FIXED}<line_sep>builder.add_articulation()<for_stmt>child root<block_start><if_stmt>(child.tag<eq>"Node")<block_start>body=child.find("Body")<line_sep>joint=child.find("Joint")<line_sep>name=child.attrib["name"]<line_sep>parent=child.attrib["parent"]<line_sep>parent_X_s=wp.transform_identity()<if_stmt>parent<in>self.node_map<block_start>parent_link=self.node_map[parent]<line_sep>parent_X_s=self.xform_map[parent]<block_end><else_stmt><block_start>parent_link=-1<block_end>body_xform=body.find("Transformation")<line_sep>joint_xform=joint.find("Transformation")<line_sep>body_mesh=body.attrib["obj"]<line_sep>body_size=np.fromstring(body.attrib["size"] sep=" ")<line_sep>body_type=body.attrib["type"]<line_sep>body_mass=body.attrib["mass"]<line_sep>body_R_s=np.fromstring(body_xform.attrib["linear"] sep=" ").reshape((3 3))<line_sep>body_t_s=np.fromstring(body_xform.attrib["translation"] sep=" ")<line_sep>joint_R_s=np.fromstring(joint_xform.attrib["linear"] sep=" ").reshape((3 3))<line_sep>joint_t_s=np.fromstring(joint_xform.attrib["translation"] sep=" ")<line_sep>joint_type=type_map[joint.attrib["type"]]<line_sep>joint_lower=np.array([-1.e+3])<line_sep>joint_upper=np.array([1.e+3])<try_stmt><block_start>joint_lower=np.fromstring(joint.attrib["lower"] sep=" ")<line_sep>joint_upper=np.fromstring(joint.attrib["upper"] sep=" ")<block_end><except_stmt><block_start><pass><block_end><if_stmt>("axis"<in>joint.attrib)<block_start>joint_axis=np.fromstring(joint.attrib["axis"] sep=" ")<block_end><else_stmt><block_start>joint_axis=np.array((0.0 0.0 0.0))<block_end>body_X_s=wp.transform(body_t_s wp.quat_from_matrix(body_R_s))<line_sep>joint_X_s=wp.transform(joint_t_s wp.quat_from_matrix(joint_R_s))<line_sep>mesh_base=os.path.splitext(body_mesh)[0]<line_sep>mesh_file=mesh_base+".usd"<line_sep>#----------------------------------- # one time conversion, put meshes into local body space (and meter units) # stage = Usd.Stage.Open("./assets/snu/OBJ/" + mesh_file) # geom = UsdGeom.Mesh.Get(stage, "/" + mesh_base + "_obj/defaultobject/defaultobject") # body_X_bs = wp.transform_inverse(body_X_s) # joint_X_bs = wp.transform_inverse(joint_X_s) # points = geom.GetPointsAttr().Get() # for i in range(len(points)): # p = wp.transform_point(joint_X_bs, points[i]*0.01) # points[i] = Gf.Vec3f(p.tolist()) # cm -> meters # geom.GetPointsAttr().Set(points) # extent = UsdGeom.Boundable.ComputeExtentFromPlugins(geom, 0.0) # geom.GetExtentAttr().Set(extent) # stage.Save() #-------------------------------------- link=-1<if_stmt>len(filter)<eq>0<or>name<in>filter<block_start>joint_X_p=wp.transform_multiply(wp.transform_inverse(parent_X_s) joint_X_s)<line_sep>body_X_c=wp.transform_multiply(wp.transform_inverse(joint_X_s) body_X_s)<if_stmt>(parent_link<eq>-1)<block_start>joint_X_p=wp.transform_identity()<block_end># add link link=builder.add_body(parent=parent_link origin=wp.transform_multiply(root_xform joint_X_s) joint_xform=joint_X_p joint_axis=joint_axis joint_type=joint_type joint_target_ke=5.0 joint_target_kd=2.0 joint_limit_lower=joint_lower[0] joint_limit_upper=joint_upper[0] joint_limit_ke=1.e+3 joint_limit_kd=1.e+2 joint_armature=armature)<line_sep># add shape shape=builder.add_shape_box(body=link pos=body_X_c.p rot=body_X_c.q hx=body_size[0]<times>0.5 hy=body_size[1]<times>0.5 hz=body_size[2]<times>0.5 ke=1.e+3<times>5.0 kd=1.e+2<times>2.0 kf=1.e+3 mu=0.5)<block_end># add lookup in name->link map # save parent transform self.xform_map[name]=joint_X_s<line_sep>self.node_map[name]=link<line_sep>self.mesh_map[mesh_base]=link<block_end><block_end><block_end><def_stmt>parse_muscles self filename builder# list of MuscleUnits <block_start>muscles=[]<line_sep>file=ET.parse(filename)<line_sep>root=file.getroot()<line_sep>self.muscle_start=len(builder.muscle_activation)<for_stmt>child root<block_start><if_stmt>(child.tag<eq>"Unit")<block_start>unit_name=child.attrib["name"]<line_sep>unit_f0=float(child.attrib["f0"])<line_sep>unit_lm=float(child.attrib["lm"])<line_sep>unit_lt=float(child.attrib["lt"])<line_sep>unit_lmax=float(child.attrib["lmax"])<line_sep>unit_pen=float(child.attrib["pen_angle"])<line_sep>m=MuscleUnit()<line_sep>m.name=unit_name<line_sep>incomplete=<false><for_stmt>waypoint child.iter("Waypoint")<block_start>way_bone=waypoint.attrib["body"]<line_sep>way_link=self.node_map[way_bone]<line_sep>way_loc=np.fromstring(waypoint.attrib["p"] sep=" " dtype=np.float32)<if_stmt>(way_link<eq>-1)<block_start>incomplete=<true><line_sep><break><block_end># transform loc to joint local space joint_X_s=self.xform_map[way_bone]<line_sep>way_loc=wp.transform_point(wp.transform_inverse(joint_X_s) way_loc)<line_sep>m.bones.append(way_link)<line_sep>m.points.append(way_loc)<block_end><if_stmt><not>incomplete<block_start>muscles.append(m)<line_sep>builder.add_muscle(m.bones m.points f0=unit_f0 lm=unit_lm lt=unit_lt lmax=unit_lmax pen=unit_pen)<block_end><block_end><block_end>self.muscles=muscles<block_end><block_end><def_stmt>parse_snu root_xform skeleton_file muscle_file builder filter armature=0.0<block_start><return>Skeleton(root_xform skeleton_file muscle_file builder filter armature=0.0)<block_end>
# -*- coding:utf-8 -*- <import_stmt>datetime<import_from_stmt>atp.api.comm_log logger<import_from_stmt>atp.api.mysql_manager ApiTestReportManager<as>arm ApiTestcaseInfoManager<as>atim ApiTestcaseMainManager<as>atmm <import_from_stmt>atp.utils.tools get_current_time<def_stmt>perfect_summary summary test_meta_list# summary['stat']['successes'] = 996 <block_start>intf_id=test_meta_list.pop(0)['intf_id']<line_sep>step_list=[]<for_stmt>testcase test_meta_list<block_start>step_list.extend(testcase['step'])<block_end># print('step_list:{}'.format(step_list)) # assert len(step_list) == len(summary['details'][0]['records']) <assert_stmt>len(step_list)<eq>len(summary['details'])<line_sep># for step in summary['details'][0]['records']: # step_meta = step_list.pop(0) # step['testcase_name'] = step_meta['testcase_name'] # if 'error_detail' in step_meta: # pass <for_stmt>step summary['details']<block_start>step['intf_id']=intf_id<for_stmt>casename step_list<block_start>step["records"][0]['testcase_name']=casename['testcase_name']<block_end><block_end><block_end><def_stmt>save_report report_path runner_summary project_id report_id=<none> is_main=<false><block_start>"""保存测试报告"""<line_sep># 没有report_path,代表运行以非正常状态结束,未生成测试报告 <if_stmt><not>report_path<block_start>status='error'<if_stmt>report_id<block_start>arm.update_report(report_id status=status)<for_stmt>detail runner_summary['details']<block_start>is_success=0<if>detail['stat']['failures']<eq>0<else>1<if_stmt>is_main<block_start>atmm.update_testcase_main(detail['case_id'] last_run=is_success)<block_end><else_stmt><block_start>atim.update_testcase(detail['case_id'] last_run=is_success)<block_end><block_end><block_end><return><block_end># start_at = datetime.datetime.strftime(runner_summary['time']['start_at'], '%Y-%m-%d %H:%M:%S') start_at=(runner_summary['time']['start_at'])<line_sep>duration='{:.2f}'.format(runner_summary['time']['duration'])<line_sep>status='fail'<if>runner_summary['stat']['failures']<else>'success'<line_sep># report = str(runner_summary) report=''<if_stmt>report_id# 异步运行,已有测试报告id <block_start>arm.update_report(report_id start_at=start_at duration=duration status=status run_type='0' report=report url=report_path api_project_id=project_id)<block_end><else_stmt># 同步运行,无测试报告id <block_start>arm.insert_report(start_at=start_at duration=duration status=status run_type='0' report=report url=report_path api_project_id=project_id)<block_end><block_end><def_stmt>save_last_run summary is_main=<false><block_start><for_stmt>detail summary['details']<block_start>is_success=0<if>detail['stat']['failures']<eq>0<else>1<if_stmt>is_main<block_start>atmm.update_testcase_main(detail['case_id'] last_run=is_success last_run_time=get_current_time())<block_end><else_stmt><block_start>atim.update_testcase(detail['case_id'] last_run=is_success last_run_time=get_current_time())<block_end><block_end><block_end>
""" The lidar system, data (2 of 2 datasets) ======================================== Generate a chart of more complex data recorded by the lidar system """<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>waveform_2=np.load('waveform_2.npy')<line_sep>t=np.arange(len(waveform_2))<line_sep>fig,ax=plt.subplots(figsize=(8 6))<line_sep>plt.plot(t waveform_2)<line_sep>plt.xlabel('Time [ns]')<line_sep>plt.ylabel('Amplitude [bins]')<line_sep>plt.show()<line_sep>
"""Create the images for the FOOOF documentation."""<import_stmt>shutil<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>fooof FOOOF FOOOFGroup<import_from_stmt>fooof.sim.gen gen_power_spectrum<import_from_stmt>fooof.plts.utils check_ax<import_from_stmt>fooof.plts.spectra plot_spectrum<import_from_stmt>fooof.utils.download load_fooof_data<line_sep>################################################################################################### ################################################################################################### <def_stmt>main ## Individual Model Plot # Download examples data files needed for this example <block_start>freqs=load_fooof_data('freqs.npy' folder='data')<line_sep>spectrum=load_fooof_data('spectrum.npy' folder='data')<line_sep># Initialize and fit an example power spectrum model fm=FOOOF(peak_width_limits=[1 6] max_n_peaks=6 min_peak_height=0.2 verbose=<false>)<line_sep>fm.fit(freqs spectrum [3 40])<line_sep># Save out the report fm.save_report('FOOOF_report.png' 'img')<line_sep>## Group Plot # Download examples data files needed for this example freqs=load_fooof_data('group_freqs.npy' folder='data')<line_sep>spectra=load_fooof_data('group_powers.npy' folder='data')<line_sep># Initialize and fit a group of example power spectrum models fg=FOOOFGroup(peak_width_limits=[1 6] max_n_peaks=6 min_peak_height=0.2 verbose=<false>)<line_sep>fg.fit(freqs spectra [3 30])<line_sep># Save out the report fg.save_report('FOOOFGroup_report.png' 'img')<line_sep>## Make the icon plot # Simulate an example power spectrum fs,ps=gen_power_spectrum([4 35] [0 1] [[10 0.3 1] [22 0.15 1.25]] nlv=0.01)<def_stmt>custom_style ax log_freqs log_powers<block_start>"""Custom styler-function for the icon plot."""<line_sep># Set the top and right side frame & ticks off ax.spines['right'].set_visible(<false>)<line_sep>ax.spines['top'].set_visible(<false>)<line_sep># Set linewidth of remaining spines ax.spines['left'].set_linewidth(10)<line_sep>ax.spines['bottom'].set_linewidth(10)<line_sep>ax.set_xticks([] [])<line_sep>ax.set_yticks([] [])<block_end># Create and save out the plot plot_spectrum(fs ps log_freqs=<false> log_powers=<true> lw=12 alpha=0.8 color='grey' plot_style=custom_style ax=check_ax(<none> [6 6]))<line_sep>plt.tight_layout()<line_sep>plt.savefig('img/spectrum.png')<line_sep>## Clean Up # Remove the data folder shutil.rmtree('data')<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for users.py."""<import_stmt>httplib<import_stmt>webapp2<import_from_stmt>upvote.gae.datastore test_utils<import_from_stmt>upvote.gae.lib.testing basetest<import_from_stmt>upvote.gae.modules.upvote_app.api.web users<import_from_stmt>upvote.gae.utils user_utils<class_stmt>UsersTest(basetest.UpvoteTestCase)<block_start>"""Base class for User handler tests."""<def_stmt>setUp self<block_start>app=webapp2.WSGIApplication(routes=[users.ROUTES])<line_sep>super(UsersTest self).setUp(wsgi_app=app)<line_sep>self.PatchValidateXSRFToken()<block_end><block_end><class_stmt>UserQueryHandlerTest(UsersTest)<block_start>ROUTE='/users/query'<def_stmt>testAdminGetList self<block_start>"""Admin retrieves list of all users."""<line_sep>user_count=10<line_sep>test_utils.CreateUsers(user_count)<with_stmt>self.LoggedInUser(admin=<true>)<block_start>response=self.testapp.get(self.ROUTE)<block_end>output=response.json<line_sep>self.assertIn('application/json' response.headers['Content-type'])<line_sep>self.assertIsInstance(output dict)<line_sep>self.assertLen(output['content'] user_count)<block_end><def_stmt>testAdminGetListPlatformNoEffect self<block_start>"""Admin specifies a platform which has no effect on the results."""<line_sep>params={'platform':'santa'}<line_sep>user_count=10<line_sep>test_utils.CreateUsers(user_count)<with_stmt>self.LoggedInUser(admin=<true>)<block_start>response=self.testapp.get(self.ROUTE params)<block_end>output=response.json<line_sep>self.assertIn('application/json' response.headers['Content-type'])<line_sep>self.assertIsInstance(output dict)<line_sep>self.assertLen(output['content'] user_count)<block_end><def_stmt>testUserGetListNoPermissions self<block_start>"""Normal user attempts to retrieve all users."""<with_stmt>self.LoggedInUser()<block_start>self.testapp.get(self.ROUTE status=httplib.FORBIDDEN)<block_end><block_end><def_stmt>testAdminGetQuery self<block_start>"""Admin queries a user."""<line_sep>params={'search':1 'searchBase':'voteWeight'}<line_sep>user_count=10<line_sep>test_utils.CreateUsers(user_count)<with_stmt>self.LoggedInUser(admin=<true>)<block_start>response=self.testapp.get(self.ROUTE params)<block_end>output=response.json<line_sep>self.assertIn('application/json' response.headers['Content-type'])<line_sep>self.assertIsInstance(output dict)<line_sep>self.assertLen(output['content'] user_count)<block_end><def_stmt>testUserGetQueryNoPermissions self<block_start>"""Normal user queries a rule."""<line_sep>params={'search':1 'searchBase':'voteWeight'}<with_stmt>self.LoggedInUser()<block_start>self.testapp.get(self.ROUTE params status=httplib.FORBIDDEN)<block_end><block_end><block_end><class_stmt>UserHandlerTest(UsersTest)<block_start>ROUTE='/users/%s'<def_stmt>testAdminGetSelf self<block_start>"""Admin getting own information."""<with_stmt>self.LoggedInUser(admin=<true>)<as>admin<block_start>response=self.testapp.get(self.ROUTE%admin.email)<line_sep>output=response.json<line_sep>self.assertIn('application/json' response.headers['Content-type'])<line_sep>self.assertIsInstance(output dict)<line_sep>self.assertTrue(output['isAdmin'])<line_sep>self.assertEqual(output['name'] admin.nickname)<block_end><block_end><def_stmt>testAdminGetOtherUser self<block_start>"""Admin getting information on another user."""<line_sep>user=test_utils.CreateUser()<with_stmt>self.LoggedInUser(admin=<true>)<block_start>response=self.testapp.get(self.ROUTE%user.email)<block_end>output=response.json<line_sep>self.assertIn('application/json' response.headers['Content-type'])<line_sep>self.assertIsInstance(output dict)<line_sep>self.assertFalse(output['isAdmin'])<line_sep>self.assertEqual(output['name'] user.nickname)<block_end><def_stmt>testAdminGetUnknownUser self<block_start>"""Admin attempting to get information on an unknown user."""<with_stmt>self.LoggedInUser(admin=<true>)<block_start>unknown_user=user_utils.UsernameToEmail('blahblahblah')<line_sep>self.testapp.get(self.ROUTE%unknown_user status=httplib.NOT_FOUND)<block_end><block_end><def_stmt>testUserGetOtherUser self<block_start>"""Normal user trying to get information on another user."""<line_sep>user=test_utils.CreateUser()<with_stmt>self.LoggedInUser()<block_start>self.testapp.get(self.ROUTE%user.email status=httplib.FORBIDDEN)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>basetest.main()<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process('TEST')<line_sep>process.options=cms.untracked.PSet(wantSummary=cms.untracked.bool(<true>))<line_sep>process.load('FWCore.MessageService.MessageLogger_cfi')<line_sep>process.MessageLogger.cerr.INFO=cms.untracked.PSet(reportEvery=cms.untracked.int32(1) # every! limit=cms.untracked.int32(-1)# no limit! )<line_sep>process.MessageLogger.cerr.FwkReport.reportEvery=10# only report every 10th event start process.MessageLogger.cerr_stats.threshold='INFO'# also info in statistics # read back the trigger decisions process.source=cms.Source('PoolSource' fileNames=cms.untracked.vstring('file:trigger.root'))<import_stmt>HLTrigger.HLTfilters.hltHighLevel_cfi<as>hlt<line_sep># accept if 'path_1' succeeds process.filter_1=hlt.hltHighLevel.clone(HLTPaths=['path_1'] throw=<false>)<line_sep># accept if 'path_2' succeeds process.filter_2=hlt.hltHighLevel.clone(HLTPaths=['path_2'] throw=<false>)<line_sep># accept if 'path_3' succeeds process.filter_3=hlt.hltHighLevel.clone(HLTPaths=['path_3'] throw=<false>)<line_sep># accept if any path succeeds (implicit) process.filter_any_implicit=hlt.hltHighLevel.clone(# HLTPaths = [], # empty is default throw=<false>)<line_sep># accept if any path succeeds (explicit) process.filter_any_explicit=hlt.hltHighLevel.clone(HLTPaths=['path_1' 'path_2' 'path_3'] throw=<false>)<line_sep># accept if any path succeeds (wildcard, '*') process.filter_any_star=hlt.hltHighLevel.clone(HLTPaths=['p*'] throw=<false>)<line_sep># accept if any path succeeds (wildcard, twice '*') process.filter_any_doublestar=hlt.hltHighLevel.clone(HLTPaths=['p*t*'] throw=<false>)<line_sep># accept if any path succeeds (wildcard, '?') process.filter_any_question=hlt.hltHighLevel.clone(HLTPaths=['path_?'] throw=<false>)<line_sep># accept if all path succeed (implicit) process.filter_all_implicit=hlt.hltHighLevel.clone(#HLTPaths = [], # empty is default andOr=<false> throw=<false>)<line_sep># accept if all path succeed (explicit) process.filter_all_explicit=hlt.hltHighLevel.clone(HLTPaths=['path_1' 'path_2' 'path_3'] andOr=<false> throw=<false>)<line_sep># accept if all path succeed (wildcard, '*') process.filter_all_star=hlt.hltHighLevel.clone(HLTPaths=['p*'] andOr=<false> throw=<false>)<line_sep># accept if all path succeed (wildcard, '*') process.filter_all_doublestar=hlt.hltHighLevel.clone(HLTPaths=['p*t*'] andOr=<false> throw=<false>)<line_sep># accept if all path succeed (wildcard, '?') process.filter_all_question=hlt.hltHighLevel.clone(HLTPaths=['path_?'] andOr=<false> throw=<false>)<line_sep># wrong L1 name (explicit) process.filter_wrong_name=hlt.hltHighLevel.clone(HLTPaths=['path_wrong'] throw=<false>)<line_sep># wrong L1 name (wildcard) process.filter_wrong_pattern=hlt.hltHighLevel.clone(HLTPaths=['*_wrong'] throw=<false>)<line_sep>## start testing AlCaRecoTriggerBits ############################## ## ## This works after having run a modified version of ## cmsRun src/CondTools/HLT/test/AlCaRecoTriggerBitsRcdWrite_cfg.py ## Simply remove overwriting of ## process.AlCaRecoTriggerBitsRcdWrite.triggerLists ... ## ## AlCaRecoTriggerBits #process.filter_AlCaRecoTriggerBits = hlt.hltHighLevel.clone( # eventSetupPathsKey = 'test13', #'TkAlMinBias', # throw = False # True #) # ## DB input #import CondCore.DBCommon.CondDBSetup_cfi #process.dbInput = cms.ESSource( # "PoolDBESSource", # CondCore.DBCommon.CondDBSetup_cfi.CondDBSetup, # connect = cms.string('sqlite_file:AlCaRecoTriggerBits.db'), # toGet = cms.VPSet(cms.PSet( # record = cms.string('AlCaRecoTriggerBitsRcd'), # tag = cms.string('TestTag') # choose tag you want # ) # ) # ) #process.end_AlCaRecoTriggerBits = cms.Path( process.filter_AlCaRecoTriggerBits ) ## ## end testing AlCaRecoTriggerBits ################################ process.end_1=cms.Path(process.filter_1)<line_sep>process.end_2=cms.Path(process.filter_2)<line_sep>process.end_3=cms.Path(process.filter_3)<line_sep>process.end_any_implicit=cms.Path(process.filter_any_implicit)<line_sep>process.end_any_explicit=cms.Path(process.filter_any_explicit)<line_sep>process.end_any_star=cms.Path(process.filter_any_star)<line_sep>process.end_any_doublestar=cms.Path(process.filter_any_doublestar)<line_sep>process.end_any_question=cms.Path(process.filter_any_question)<line_sep>#process.end_any_filter = cms.Path( ~ ( ~ process.filter_1 + ~ process.filter_2 + ~ process.filter_3) ) process.end_all_implicit=cms.Path(process.filter_all_implicit)<line_sep>process.end_all_explicit=cms.Path(process.filter_all_explicit)<line_sep>process.end_all_star=cms.Path(process.filter_all_star)<line_sep>process.end_all_doublestar=cms.Path(process.filter_all_doublestar)<line_sep>process.end_all_question=cms.Path(process.filter_all_question)<line_sep>process.end_all_filter=cms.Path(process.filter_1+process.filter_2+process.filter_3)<line_sep>process.end_wrong_name=cms.Path(process.filter_wrong_name)<line_sep>process.end_wrong_pattern=cms.Path(process.filter_wrong_pattern)<line_sep>process.end_not_wrong_pattern=cms.Path(~process.filter_wrong_pattern)<line_sep># define an EndPath to analyze all other path results process.hltTrigReport=cms.EDAnalyzer('HLTrigReport' HLTriggerResults=cms.InputTag('TriggerResults' '' 'TEST'))<line_sep>process.HLTAnalyzerEndpath=cms.EndPath(process.hltTrigReport)<line_sep>
<import_from_future_stmt> print_function<import_from_future_stmt> division<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_stmt>numpy<as>np<line_sep>print("PyTorch Version: " torch.__version__)<import_stmt>pickle<import_stmt>os<import_stmt>scipy.io<as>sio<import_stmt>cv2<import_from_stmt>model *<import_from_stmt>pano get_ini_cor<import_from_stmt>pano_opt_gen optimize_cor_id<import_stmt>post_proc2<as>post_proc<import_from_stmt>shapely.geometry Polygon<import_from_stmt>scipy.ndimage.filters maximum_filter<import_from_stmt>scipy.ndimage convolve<import_stmt>scipy.signal<import_stmt>sys<import_from_stmt>sklearn.metrics classification_report<line_sep># general case # Top level data directory. Here we assume the format of the directory conforms # to the ImageFolder structure test_path='./data/matterport/mp3d_align/'<line_sep>weight_path='./model/resnet34_matterport.pth'<line_sep>save_path='./result_gen/'<line_sep>depth_path='./result_gen_depth/'<line_sep>depth_path_gt='./data/matterport/share_depth/'<line_sep># Pre-trained models to choose from [resnet18, resnet34, resnet50] #model_name = "resnet18" model_name="resnet34"<line_sep>#model_name = "resnet50" num_classes=1024<line_sep>print("Load Models...")<line_sep># Define the encoder encoder=initialize_encoder(model_name num_classes use_pretrained=<true>)<line_sep># Full model model_ft=SegNet(encoder num_classes)<line_sep>model_ft.load_state_dict(torch.load(weight_path))<line_sep># Detect if we have a GPU available device=torch.device("cuda:0"<if>torch.cuda.is_available()<else>"cpu")<line_sep># Send the model to GPU model_ft=model_ft.to(device)<line_sep># evaluation mode model_ft.eval()<def_stmt>find_N_peaks signal r=29 min_v=0.05 N=<none><block_start>max_v=maximum_filter(signal size=r mode='wrap')<line_sep>pk_loc=np.where(max_v<eq>signal)[0]<line_sep>pk_loc=pk_loc[signal[pk_loc]<g>min_v]<line_sep># check for odd case, remove one <if_stmt>(pk_loc.shape[0]%2)<ne>0<block_start>pk_id=np.argsort(-signal[pk_loc])<line_sep>pk_loc=pk_loc[pk_id[:-1]]<line_sep>pk_loc=np.sort(pk_loc)<block_end><if_stmt>N<is><not><none><block_start>order=np.argsort(-signal[pk_loc])<line_sep>pk_loc=pk_loc[order[:N]]<line_sep>pk_loc=pk_loc[np.argsort(pk_loc)]<block_end><return>pk_loc signal[pk_loc]<block_end><def_stmt>find_N_peaks_conv signal prominence distance N=4<block_start>locs,_=scipy.signal.find_peaks(signal prominence=prominence distance=distance)<line_sep>pks=signal[locs]<line_sep>pk_id=np.argsort(-pks)<line_sep>pk_loc=locs[pk_id[:min(N len(pks))]]<line_sep>pk_loc=np.sort(pk_loc)<line_sep><return>pk_loc signal[pk_loc]<block_end><def_stmt>get_ini_cor cor_img d1=21 d2=3<block_start>cor=convolve(cor_img np.ones((d1 d1)) mode='constant' cval=0.0)<line_sep>cor_id=[]<line_sep>cor_=cor_img.sum(0)<line_sep>cor_=(cor_-np.amin(cor_))/np.ptp(cor_)<line_sep>min_v=0.25#0.05 xs_=find_N_peaks(cor_ r=26 min_v=min_v N=<none>)[0]<line_sep># spetial case for too less corner <if_stmt>xs_.shape[0]<l>4<block_start>xs_=find_N_peaks(cor_ r=26 min_v=0.05 N=<none>)[0]<if_stmt>xs_.shape[0]<l>4<block_start>xs_=find_N_peaks(cor_ r=26 min_v=0 N=<none>)[0]<block_end><block_end>X_loc=xs_<for_stmt>x X_loc<block_start>x_=int(np.round(x))<line_sep>V_signal=cor[: max(0 x_-d2):x_+d2+1].sum(1)<line_sep>y1,y2=find_N_peaks_conv(V_signal prominence=<none> distance=20 N=2)[0]<line_sep>cor_id.append((x y1))<line_sep>cor_id.append((x y2))<block_end>cor_id=np.array(cor_id np.float64)<line_sep><return>cor_id<block_end><def_stmt>test_general dt_cor_id gt_cor_id w h losses<block_start>dt_floor_coor=dt_cor_id[1::2]<line_sep>dt_ceil_coor=dt_cor_id[0::2]<line_sep>gt_floor_coor=gt_cor_id[1::2]<line_sep>gt_ceil_coor=gt_cor_id[0::2]<assert_stmt>(dt_floor_coor[: 0]<ne>dt_ceil_coor[: 0]).sum()<eq>0<assert_stmt>(gt_floor_coor[: 0]<ne>gt_ceil_coor[: 0]).sum()<eq>0<line_sep># Eval 3d IoU and height error(in meter) N=len(dt_floor_coor)<line_sep>ch=-1.6<line_sep>dt_floor_xy=post_proc.np_coor2xy(dt_floor_coor ch 1024 512 floorW=1 floorH=1)<line_sep>gt_floor_xy=post_proc.np_coor2xy(gt_floor_coor ch 1024 512 floorW=1 floorH=1)<line_sep>dt_poly=Polygon(dt_floor_xy)<line_sep>gt_poly=Polygon(gt_floor_xy)<line_sep>area_dt=dt_poly.area<line_sep>area_gt=gt_poly.area<if_stmt>area_dt<l>1e-05<block_start>print('too small room')<line_sep># Add a result n_corners=len(gt_floor_coor)<line_sep>n_corners=str(n_corners)<if>n_corners<l>14<else>'14+'<line_sep>losses[n_corners]['2DIoU'].append(0)<line_sep>losses[n_corners]['3DIoU'].append(0)<line_sep>losses['overall']['2DIoU'].append(0)<line_sep>losses['overall']['3DIoU'].append(0)<line_sep><return><block_end>area_inter=dt_poly.intersection(gt_poly).area<line_sep>area_union=dt_poly.union(gt_poly).area<line_sep>area_pred_wo_gt=dt_poly.difference(gt_poly).area<line_sep>area_gt_wo_pred=gt_poly.difference(dt_poly).area<line_sep>iou2d=area_inter/(area_gt+area_dt-area_inter)<line_sep>cch_dt=post_proc.get_z1(dt_floor_coor[: 1] dt_ceil_coor[: 1] ch 512)<line_sep>cch_gt=post_proc.get_z1(gt_floor_coor[: 1] gt_ceil_coor[: 1] ch 512)<line_sep>h_dt=abs(cch_dt.mean()-ch)<line_sep>h_gt=abs(cch_gt.mean()-ch)<line_sep>#iouH = min(h_dt, h_gt) / max(h_dt, h_gt) #iou3d = iou2d * iouH iou3d=(area_inter<times>min(h_dt h_gt))/(area_pred_wo_gt<times>h_dt+area_gt_wo_pred<times>h_gt+area_inter<times>max(h_dt h_gt))<line_sep># Add a result n_corners=len(gt_floor_coor)<line_sep>n_corners=str(n_corners)<if>n_corners<l>14<else>'14+'<line_sep>losses[n_corners]['2DIoU'].append(iou2d)<line_sep>losses[n_corners]['3DIoU'].append(iou3d)<line_sep>losses['overall']['2DIoU'].append(iou2d)<line_sep>losses['overall']['3DIoU'].append(iou3d)<block_end># Load data gt_txt_path='/data/czou4/Layout/_final_label_v2/test.txt'<line_sep>namelist=[]<with_stmt>open(gt_txt_path 'r')<as>f<block_start><while_stmt>(<true>)<block_start>line=f.readline().strip()<if_stmt><not>line<block_start><break><block_end>namelist.append(line)<block_end><block_end>criterion=nn.BCELoss()<line_sep>criterion2=nn.BCELoss()<line_sep>cnt=0<line_sep>num=0<line_sep>loss_cor=0.0<line_sep>loss_pe=0.0<line_sep>loss_3d=0.0<line_sep>loss_sum=0.0<line_sep>losses=dict([(n_corner {'2DIoU':[] '3DIoU':[] 'rmse':[] 'delta_1':[]})<for>n_corner ['4' '6' '8' '10' '12' '14+' 'overall']])<line_sep># for precision recall target_names=['4 corners' '6 corners' '8 corners' '10 corners' '12 corners' '14 corners' '16 corners' '18 corners']<line_sep>y_true=np.zeros(len(namelist))<line_sep>y_pred=np.zeros(len(namelist))<for_stmt>file_list namelist#file_list = np.random.choice(namelist, 1) #file_list = file_list[0] <block_start>print(file_list)<line_sep>file_list_sub=file_list.split(" ")<line_sep>pkl_path=os.path.join(test_path file_list_sub[0] file_list_sub[1])<line_sep>img=cv2.imread(os.path.join(pkl_path 'aligned_rgb.png'))<line_sep>img=img.astype('float32')/255.0<line_sep>mask=cv2.imread(os.path.join(pkl_path 'aligned_line.png'))<line_sep>mask=mask.astype('float32')/255.0<line_sep>gt=np.loadtxt(os.path.join(pkl_path 'cor.txt'))<line_sep># lr flip img2=np.fliplr(img).copy()<line_sep>mask2=np.fliplr(mask).copy()<line_sep>image=torch.tensor(img).to(device).float()<line_sep>masks=torch.tensor(mask).to(device).float()<line_sep>inputs=image.permute(2 0 1)<line_sep>inputs=inputs.unsqueeze(0)<line_sep>masks=masks.permute(2 0 1)<line_sep>masks=masks.unsqueeze(0)<line_sep>inputs=torch.cat((inputs masks) 1)<line_sep>image2=torch.tensor(img2).to(device).float()<line_sep>masks2=torch.tensor(mask2).to(device).float()<line_sep>inputs2=image2.permute(2 0 1)<line_sep>inputs2=inputs2.unsqueeze(0)<line_sep>masks2=masks2.permute(2 0 1)<line_sep>masks2=masks2.unsqueeze(0)<line_sep>inputs2=torch.cat((inputs2 masks2) 1)<line_sep>inputs=torch.cat((inputs inputs2) 0)<line_sep># forward outputs,outputs2=model_ft(inputs)<line_sep># lr flip and take mean outputs1=outputs[1]<line_sep>outputs22=outputs2[1]<line_sep>inv_idx=torch.arange(outputs1.size(2)-1 -1 -1).to(device).long()<line_sep>outputs1=outputs1.index_select(2 inv_idx)<line_sep>outputs=torch.mean(torch.cat((outputs[0].unsqueeze(0) outputs1.unsqueeze(0)) 0) 0 <true>)<line_sep>outputs22=outputs22.index_select(2 inv_idx)<line_sep>outputs2=torch.mean(torch.cat((outputs2[0].unsqueeze(0) outputs22.unsqueeze(0)) 0) 0 <true>)<line_sep>outputs=outputs.squeeze(0).permute(1 2 0)<line_sep>outputs2=outputs2.squeeze(0).squeeze(0)<line_sep>inputs=inputs[0].permute(1 2 0)<line_sep>#gradient ascent refinement cor_img=outputs2.data.cpu().numpy()<line_sep>edg_img=outputs.data.cpu().numpy()<line_sep>#general layout, tp view cor_=cor_img.sum(0)<line_sep>cor_=(cor_-np.amin(cor_))/np.ptp(cor_)<line_sep>min_v=0.25#0.05 xs_=find_N_peaks(cor_ r=26 min_v=min_v N=<none>)[0]<line_sep># spetial case for too less corner <if_stmt>xs_.shape[0]<l>4<block_start>xs_=find_N_peaks(cor_ r=26 min_v=0.05 N=<none>)[0]<if_stmt>xs_.shape[0]<l>4<block_start>xs_=find_N_peaks(cor_ r=26 min_v=0 N=<none>)[0]<block_end><block_end># get ceil and floor line ceil_img=edg_img[: : 1]<line_sep>floor_img=edg_img[: : 2]<line_sep>ceil_idx=np.argmax(ceil_img axis=0)<line_sep>floor_idx=np.argmax(floor_img axis=0)<line_sep># Init floor/ceil plane z0=50<line_sep>force_cuboid=<false><line_sep>_,z1=post_proc.np_refine_by_fix_z(ceil_idx floor_idx z0)<line_sep># Generate general wall-wall cor,xy_cor=post_proc.gen_ww(xs_ ceil_idx z0 tol=abs(0.16<times>z1/1.6) force_cuboid=force_cuboid)<if_stmt><not>force_cuboid# Check valid (for fear self-intersection) <block_start>xy2d=np.zeros((len(xy_cor) 2) np.float32)<for_stmt>i range(len(xy_cor))<block_start>xy2d[i xy_cor[i]['type']]=xy_cor[i]['val']<line_sep>xy2d[i xy_cor[i-1]['type']]=xy_cor[i-1]['val']<block_end><if_stmt><not>Polygon(xy2d).is_valid# actually it's not force cuboid, just assume all corners are visible, go back to original LayoutNet initialization #print( # 'Fail to generate valid general layout!! ' # 'Generate cuboid as fallback.', # file=sys.stderr) <block_start>cor_id=get_ini_cor(cor_img 21 3)<line_sep>force_cuboid=<true><block_end><block_end><if_stmt><not>force_cuboid# Expand with btn coory <block_start>cor=np.hstack([cor post_proc.infer_coory(cor[: 1] z1-z0 z0)[: <none>]])<line_sep># Collect corner position in equirectangular cor_id=np.zeros((len(cor)<times>2 2) np.float32)<for_stmt>j range(len(cor))<block_start>cor_id[j<times>2]=cor[j 0] cor[j 1]<line_sep>cor_id[j<times>2+1]=cor[j 0] cor[j 2]<block_end><block_end># refinement cor_id=optimize_cor_id(cor_id edg_img cor_img num_iters=100 verbose=<false>)<line_sep>test_general(cor_id gt 1024 512 losses)<line_sep># save, uncomment to generate depth map #print(save_path+file_list_sub[0]+'_'+file_list_sub[1]+'.mat') #sio.savemat(save_path+file_list_sub[0]+'_'+file_list_sub[1]+'.mat',{'cor_id':cor_id}) #load pred_depth=depth_path+file_list_sub[0]+'_'+file_list_sub[1]+'.mat'<if_stmt>os.path.exists(pred_depth)<block_start>pred_depth=sio.loadmat(pred_depth)<line_sep>pred_depth=pred_depth['im_depth']<line_sep>#gt gt_depth=np.load(os.path.join(depth_path_gt file_list_sub[0] file_list_sub[1] 'new_depth.npy'))<line_sep>pred_depth=cv2.resize(pred_depth (gt_depth.shape[1] gt_depth.shape[0]))<line_sep># rmse pred_depth=pred_depth[np.nonzero(gt_depth)]<line_sep>gt_depth=gt_depth[np.nonzero(gt_depth)]<line_sep>rmse=np.average((gt_depth-pred_depth)<power>2)<power>0.5<line_sep># delta_1 max_map=np.where(gt_depth/pred_depth<g>pred_depth/gt_depth gt_depth/pred_depth pred_depth/gt_depth)<line_sep>delta_1=np.average(np.where(max_map<l>1.25 1 0))<line_sep># Add a result n_corners=len(gt[1::2])<line_sep>n_corners=str(n_corners)<if>n_corners<l>14<else>'14+'<line_sep>losses[n_corners]['rmse'].append(rmse)<line_sep>losses[n_corners]['delta_1'].append(delta_1)<line_sep>losses['overall']['rmse'].append(rmse)<line_sep>losses['overall']['delta_1'].append(delta_1)<block_end>torch.cuda.empty_cache()<line_sep>#del outputs1, outputs, outputs2, outputs22, labels, labels2, inputs, inputs2, loss <del_stmt>outputs1 outputs outputs2 outputs22 inputs inputs2<line_sep>y_true[cnt]=int(gt.shape[0]<floordiv>2<floordiv>2-2)<line_sep>y_pred[cnt]=int(cor_id.shape[0]<floordiv>2<floordiv>2-2)<line_sep>cnt<augadd>1<line_sep>num<augadd>1<line_sep>iou2d=np.array(losses['overall']['2DIoU'])<line_sep>iou3d=np.array(losses['overall']['3DIoU'])<line_sep>rmse=np.array(losses['overall']['rmse'])<line_sep>delta_1=np.array(losses['overall']['delta_1'])<line_sep>print('No. {}, 2d Loss: {:.6f}, 3d Loss: {:.6f}, rmse: {:.6f}, delta_1: {:.6f}'.format(cnt iou2d.mean()<times>100 iou3d.mean()<times>100 rmse.mean()<times>100 delta_1.mean()<times>100))<block_end><for_stmt>k,result losses.items()<block_start>iou2d=np.array(result['2DIoU'])<line_sep>iou3d=np.array(result['3DIoU'])<line_sep>rmse=np.array(result['rmse'])<line_sep>delta_1=np.array(result['delta_1'])<if_stmt>len(iou2d)<eq>0<block_start><continue><block_end>print('GT #Corners: %s (%d instances)'%(k len(iou2d)))<line_sep>print(' 2DIoU: %.2f'%(iou2d.mean()<times>100))<line_sep>print(' 3DIoU: %.2f'%(iou3d.mean()<times>100))<line_sep>print(' RMSE: %.2f'%(rmse.mean()<times>100))<line_sep>print(' Delta_1: %.2f'%(delta_1.mean()<times>100))<block_end>print(classification_report(y_true y_pred target_names=target_names))<line_sep>
test.ip.c_pointer_types.cpp[0][0][0]=["Cat" "Dog"]<line_sep>test.ip.c_pointer_types.cpp[0][0][1]=["Horse"]<line_sep>
# Copyright 2000-2002 by <NAME>. # Revisions copyright 2007-2010 by <NAME>. # All rights reserved. # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """Alphabets were previously used to declare sequence type and letters (OBSOLETE). The design of Bio.Aphabet included a number of historic design choices which, with the benefit of hindsight, were regretable. Bio.Alphabet was therefore removed from Biopython in release 1.78. Instead, the molecule type is included as an annotation on SeqRecords where appropriate. Please see https://biopython.org/wiki/Alphabet for examples showing how to transition from Bio.Alphabet to molecule type annotations. """<line_sep><raise>ImportError("Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://biopython.org/wiki/Alphabet for more information.")<line_sep>
<import_from_stmt>decimal Decimal<import_from_stmt>...webhooks get_or_create_adyen_partial_payments<def_stmt>test_get_or_create_adyen_partial_payments_with_additional_actions_response payment_adyen_for_checkout # given <block_start>notification_data={"additionalData":{"order-2-paymentMethod":"visa" "threeds2.cardEnrolled":"false" "order-2-pspReference":"861643021198177D" "order-2-paymentAmount":"GBP 16.29" "recurringProcessingModel":"Subscription" "paymentMethod":"visa" "order-1-pspReference":"861643021155073F" "order-1-paymentAmount":"GBP 14.71" "order-1-paymentMethod":"givex" } "pspReference":"861643021198177D" "resultCode":"Authorised" "merchantReference":"UGF5bWVudDoyNw==" "paymentMethod":"visa" "shopperLocale":"en_GB" }<line_sep>checkout=payment_adyen_for_checkout.checkout<line_sep># when get_or_create_adyen_partial_payments(notification_data payment_adyen_for_checkout)<line_sep># then partial_payments=list(checkout.payments.exclude(id=payment_adyen_for_checkout.id))<assert_stmt>len(partial_payments)<eq>2<assert_stmt>all([payment.is_active<is><false><for>payment partial_payments])<assert_stmt>all([payment.partial<is><true><for>payment partial_payments])<assert_stmt>all([payment.is_active<is><false><for>payment partial_payments])<assert_stmt>any(payment.total<eq>Decimal("14.71")<for>payment partial_payments)<assert_stmt>any(payment.total<eq>Decimal("16.29")<for>payment partial_payments)<assert_stmt>any(payment.psp_reference<eq>"861643021155073F"<for>payment partial_payments)<assert_stmt>any(payment.psp_reference<eq>"861643021198177D"<for>payment partial_payments)<block_end><def_stmt>test_get_or_create_adyen_partial_payments_with_notification_payload notification payment_adyen_for_checkout# given <block_start>notification_data=notification()<line_sep>notification_data["additionalData"]={"order-2-paymentMethod":"visa" "order-2-pspReference":"881643125782168B" "order-2-paymentAmount":"GBP 29.10" "order-1-pspReference":"861643125754056E" "order-1-paymentAmount":"GBP 41.90" "order-1-paymentMethod":"givex" }<line_sep>checkout=payment_adyen_for_checkout.checkout<line_sep># when get_or_create_adyen_partial_payments(notification_data payment_adyen_for_checkout)<line_sep># then partial_payments=list(checkout.payments.exclude(id=payment_adyen_for_checkout.id))<assert_stmt>len(partial_payments)<eq>2<assert_stmt>all([payment.is_active<is><false><for>payment partial_payments])<assert_stmt>all([payment.partial<is><true><for>payment partial_payments])<assert_stmt>all([payment.is_active<is><false><for>payment partial_payments])<assert_stmt>any(payment.total<eq>Decimal("29.10")<for>payment partial_payments)<assert_stmt>any(payment.total<eq>Decimal("41.90")<for>payment partial_payments)<assert_stmt>any(payment.psp_reference<eq>"881643125782168B"<for>payment partial_payments)<assert_stmt>any(payment.psp_reference<eq>"861643125754056E"<for>payment partial_payments)<block_end>
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. IGNORED_FILE_PREFIXES=["."]<line_sep>IGNORED_FILE_SUFFIXES=["~" ".swp"]<line_sep>IGNORED_DIRS=[".git" ".svn" ".hg"]<def_stmt>filter_filenames filenames ignored_files=[".hgignore"]<block_start><for_stmt>filename filenames<block_start><if_stmt>filename<in>ignored_files<block_start><continue><block_end><if_stmt>any([filename.startswith(suffix)<for>suffix IGNORED_FILE_PREFIXES])<block_start><continue><block_end><if_stmt>any([filename.endswith(suffix)<for>suffix IGNORED_FILE_SUFFIXES])<block_start><continue><block_end><yield>filename<block_end><block_end><def_stmt>filter_dirnames dirnames<block_start><return>[dirname<for>dirname dirnames<if>dirname<not><in>IGNORED_DIRS]<block_end>
# Copyright (c) 2017 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>datetime<import_stmt>importlib<import_stmt>os<import_stmt>unittest<try_stmt><block_start><import_stmt>unittest.mock<as>mock<block_end><except_stmt>ImportError<block_start><import_stmt>mock<block_end><import_from_stmt>cloudbaseinit conf<as>cloudbaseinit_conf<import_from_stmt>cloudbaseinit exception<import_from_stmt>cloudbaseinit.plugins.common base<import_from_stmt>cloudbaseinit.tests testutils<line_sep>CONF=cloudbaseinit_conf.CONF<line_sep>MODPATH="cloudbaseinit.plugins.windows.azureguestagent"<class_stmt>AzureGuestAgentPluginTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.mock_wmi=mock.MagicMock()<line_sep>self._moves_mock=mock.MagicMock()<line_sep>patcher=mock.patch.dict("sys.modules" {"wmi":self.mock_wmi "six.moves":self._moves_mock})<line_sep>patcher.start()<line_sep>self.addCleanup(patcher.stop)<line_sep>self._winreg_mock=self._moves_mock.winreg<line_sep>self._azureguestagent=importlib.import_module(MODPATH)<line_sep>self._azureagentplugin=self._azureguestagent.AzureGuestAgentPlugin()<line_sep>self.snatcher=testutils.LogSnatcher(MODPATH)<block_end><def_stmt>test_check_delete_service self<block_start>mock_osutils=mock.Mock()<line_sep>mock_service_name=mock.sentinel.name<line_sep>self._azureagentplugin._check_delete_service(mock_osutils mock_service_name)<line_sep>mock_osutils.check_service_exists.assert_called_once_with(mock_service_name)<line_sep>mock_osutils.get_service_status.assert_called_once_with(mock_service_name)<line_sep>mock_osutils.stop_service.assert_called_once_with(mock_service_name wait=<true>)<line_sep>mock_osutils.delete_service.assert_called_once_with(mock_service_name)<block_end>@mock.patch(MODPATH+".AzureGuestAgentPlugin._check_delete_service")<def_stmt>test_remove_agent_services self mock_check_delete_service<block_start>mock_osutils=mock.Mock()<line_sep>expected_logs=["Stopping and removing any existing Azure guest "<concat>"agent services"]<with_stmt>self.snatcher<block_start>self._azureagentplugin._remove_agent_services(mock_osutils)<block_end>self.assertEqual(self.snatcher.output expected_logs)<line_sep>self.assertEqual(mock_check_delete_service.call_count 3)<block_end>@mock.patch("shutil.rmtree")@mock.patch("os.path.exists")@mock.patch("os.getenv")<def_stmt>test_remove_azure_dirs self mock_os_getenv mock_exists mock_rmtree<block_start>mock_rmtree.side_effect=(<none> Exception)<line_sep>mock_exists.return_value=<true><line_sep>mock_os_getenv.return_value="fake_path"<with_stmt>self.snatcher<block_start>self._azureagentplugin._remove_azure_dirs()<block_end>mock_os_getenv.assert_called_with("SystemDrive")<line_sep>self.assertEqual(mock_os_getenv.call_count 2)<line_sep>self.assertEqual(mock_exists.call_count 2)<line_sep>self.assertEqual(mock_rmtree.call_count 2)<block_end><def_stmt>test_set_registry_vm_type self<block_start>vm_type=mock.sentinel.vm<line_sep>key_name="SOFTWARE\\Microsoft\\Windows Azure"<line_sep>self._azureagentplugin._set_registry_vm_type(vm_type)<line_sep>key=self._winreg_mock.CreateKey.return_value.__enter__.return_value<line_sep>self._winreg_mock.CreateKey.assert_called_with(self._winreg_mock.HKEY_LOCAL_MACHINE key_name)<line_sep>self._winreg_mock.SetValueEx.assert_called_once_with(key "VMType" 0 self._winreg_mock.REG_SZ vm_type)<block_end><def_stmt>test_set_registry_ga_params self<block_start>fake_version=(1 2 3 4)<line_sep>fake_install_timestamp=datetime.datetime.now()<line_sep>key_name="SOFTWARE\\Microsoft\\GuestAgent"<line_sep>self._azureagentplugin._set_registry_ga_params(fake_version fake_install_timestamp)<line_sep>self._winreg_mock.CreateKey.assert_called_with(self._winreg_mock.HKEY_LOCAL_MACHINE key_name)<line_sep>self.assertEqual(self._winreg_mock.SetValueEx.call_count 2)<block_end>@mock.patch(MODPATH+".AzureGuestAgentPlugin._set_registry_ga_params")@mock.patch(MODPATH+".AzureGuestAgentPlugin._set_registry_vm_type")<def_stmt>test_configure_rd_agent self mock_set_registry_vm_type mock_set_registry_ga_params<block_start>mock_osutils=mock.Mock()<line_sep>fake_ga_path="C:\\"<line_sep>expected_rd_path=os.path.join(fake_ga_path self._azureguestagent.RDAGENT_FILENAME)<line_sep>expected_path=os.path.join(fake_ga_path "TransparentInstaller.dll")<line_sep>self._azureagentplugin._configure_rd_agent(mock_osutils fake_ga_path)<line_sep>mock_osutils.create_service.assert_called_once_with(self._azureguestagent.SERVICE_NAME_RDAGENT self._azureguestagent.SERVICE_NAME_RDAGENT expected_rd_path mock_osutils.SERVICE_START_MODE_MANUAL)<line_sep>mock_osutils.get_file_version.assert_called_once_with(expected_path)<line_sep>mock_set_registry_vm_type.assert_called_once_with()<block_end>@mock.patch(MODPATH+".AzureGuestAgentPlugin._run_logman")<def_stmt>test_stop_event_trace self mock_run_logman<block_start>mock_osutils=mock.Mock()<line_sep>fake_name=mock.sentinel.event_name<line_sep>res=self._azureagentplugin._stop_event_trace(mock_osutils fake_name)<line_sep>mock_run_logman.assert_called_once_with(mock_osutils "stop" fake_name <false>)<line_sep>self.assertIsNotNone(res)<block_end>@mock.patch(MODPATH+".AzureGuestAgentPlugin._run_logman")<def_stmt>test_delete_event_trace self mock_run_logman<block_start>mock_osutils=mock.Mock()<line_sep>fake_name=mock.sentinel.event_name<line_sep>res=self._azureagentplugin._delete_event_trace(mock_osutils fake_name)<line_sep>mock_run_logman.assert_called_once_with(mock_osutils "delete" fake_name)<line_sep>self.assertIsNotNone(res)<block_end><def_stmt>test_run_logman self<block_start>mock_osutils=mock.Mock()<line_sep>fake_action=mock.sentinel.action<line_sep>fake_name=mock.sentinel.cmd_name<line_sep>expected_args=["logman.exe" "-ets" fake_action fake_name]<line_sep>mock_osutils.execute_system32_process.return_value=(0 0 -1)<line_sep>self._azureagentplugin._run_logman(mock_osutils fake_action fake_name <true>)<line_sep>mock_osutils.execute_system32_process.assert_called_once_with(expected_args)<block_end>@mock.patch(MODPATH+".AzureGuestAgentPlugin._stop_event_trace")<def_stmt>test_stop_ga_event_traces self mock_stop_event_trace<block_start>mock_osutils=mock.Mock()<line_sep>expected_logs=["Stopping Azure guest agent event traces"]<with_stmt>self.snatcher<block_start>self._azureagentplugin._stop_ga_event_traces(mock_osutils)<block_end>self.assertEqual(mock_stop_event_trace.call_count 4)<line_sep>self.assertEqual(self.snatcher.output expected_logs)<block_end>@mock.patch(MODPATH+".AzureGuestAgentPlugin._delete_event_trace")<def_stmt>test_delete_ga_event_traces self mock_delete_event_trace<block_start>mock_osutils=mock.Mock()<line_sep>expected_logs=["Deleting Azure guest agent event traces"]<with_stmt>self.snatcher<block_start>self._azureagentplugin._delete_ga_event_traces(mock_osutils)<block_end>self.assertEqual(mock_delete_event_trace.call_count 2)<line_sep>self.assertEqual(self.snatcher.output expected_logs)<block_end>@mock.patch("os.path.exists")<def_stmt>_test_get_guest_agent_source_path self mock_exists drives=<none> exists=<false><block_start>mock_osutils=mock.Mock()<line_sep>mock_exists.return_value=exists<line_sep>mock_osutils.get_logical_drives.return_value=drives<if_stmt><not>exists<block_start>self.assertRaises(exception.CloudbaseInitException self._azureagentplugin._get_guest_agent_source_path mock_osutils)<line_sep><return><block_end>res=self._azureagentplugin._get_guest_agent_source_path(mock_osutils)<line_sep>self.assertIsNotNone(res)<block_end><def_stmt>test_get_guest_agent_source_path_no_agent self<block_start>self._test_get_guest_agent_source_path(drives=[])<block_end><def_stmt>test_get_guest_agent_source_path self<block_start>mock_drive="C:"<line_sep>self._test_get_guest_agent_source_path(drives=[mock_drive] exists=<true>)<block_end><def_stmt>_test_execute self provisioning_data=<none> expected_logs=<none><block_start>mock_service=mock.Mock()<line_sep>mock_sharedata=mock.Mock()<line_sep>expected_res=(base.PLUGIN_EXECUTION_DONE <false>)<line_sep>(mock_service.get_vm_agent_package_provisioning_data.return_value)=provisioning_data<if_stmt><not>provisioning_data<or><not>provisioning_data.get("provision")<block_start><with_stmt>self.snatcher<block_start>res=self._azureagentplugin.execute(mock_service mock_sharedata)<block_end>(mock_service.get_vm_agent_package_provisioning_data.assert_called_once_with())<line_sep>self.assertEqual(res expected_res)<line_sep>self.assertEqual(self.snatcher.output expected_logs)<line_sep><return><block_end><block_end><def_stmt>test_execute_no_data self<block_start>expected_logs=["Azure guest agent provisioning data not present"]<line_sep>self._test_execute(expected_logs=expected_logs)<block_end><def_stmt>test_execute_no_provision self<block_start>mock_data={"provision":<none>}<line_sep>expected_logs=["Skipping Azure guest agent provisioning "<concat>"as by metadata request"]<line_sep>self._test_execute(provisioning_data=mock_data expected_logs=expected_logs)<block_end><def_stmt>test_get_os_requirements self<block_start>expected_res=('win32' (6 1))<line_sep>res=self._azureagentplugin.get_os_requirements()<line_sep>self.assertEqual(res expected_res)<block_end><block_end>
<import_from_stmt>lldbsuite.test lldbinline<import_from_stmt>lldbsuite.test decorators<line_sep>lldbinline.MakeInlineTest(__file__ globals() lldbinline.expectedFailureAll(oslist=["windows" "linux" "netbsd"]))<line_sep>
"""A simple project that is compatible with both 'brotli' C bindings and 'brotlicffi' CFFI bindings """<import_stmt>sys<try_stmt><block_start><import_stmt>brotlicffi<as>brotli<block_end><except_stmt>ImportError<block_start><import_stmt>brotli<block_end><def_stmt>main <block_start>data=sys.argv[1].encode("utf-8")<line_sep>print(f"Compressing data: {data}")<line_sep>compressor=brotli.Compressor(mode=brotli.MODE_TEXT)<line_sep>compressed=compressor.process(data)+compressor.finish()<line_sep>print(f"Compressed data: {compressed}")<line_sep>decompressor=brotli.Decompressor()<line_sep>decompressed=decompressor.process(compressed)+decompressor.finish()<line_sep>print(f"Decompressed data: {decompressed}")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>torch<class_stmt>NoiseManager<block_start><def_stmt>__init__ self noise device trace_model=<false><block_start>self.device=device<line_sep>self.noise_lut={}<if_stmt>noise<is><not><none><block_start><for_stmt>i range(len(noise))<block_start><if_stmt><not><none><in>noise<block_start>self.noise_lut[noise[i].size(-1)]=noise[i]<block_end><block_end><block_end>self.trace_model=trace_model<block_end><def_stmt>__call__ self size b=1<block_start><if_stmt>self.trace_model<block_start><return><none><if>b<eq>1<else>[<none>]<times>b<block_end><if_stmt>size<in>self.noise_lut<block_start><return>self.noise_lut[size]<block_end><else_stmt><block_start><return>torch.randn(b 1 size size).to(self.device)<block_end><block_end><block_end>
### tensorflow==2.2.0 <import_stmt>tensorflow<as>tf<line_sep># Weight Quantization - Input/Output=float32 converter=tf.lite.TFLiteConverter.from_saved_model('saved_model_mosaic')<line_sep>converter.optimizations=[tf.lite.Optimize.OPTIMIZE_FOR_SIZE]<line_sep>converter.target_spec.supported_ops=[tf.lite.OpsSet.TFLITE_BUILTINS tf.lite.OpsSet.SELECT_TF_OPS]<line_sep>tflite_quant_model=converter.convert()<with_stmt>open('mosaic_224_weight_quant.tflite' 'wb')<as>w<block_start>w.write(tflite_quant_model)<block_end>print("Weight Quantization complete! - mosaic_224_weight_quant.tflite")<line_sep>
<import_stmt>os<import_stmt>pytest<import_stmt>dcos_installer.config_util<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>mock_installer_latest_complete_artifact monkeypatch<block_start>monkeypatch.setattr(dcos_installer.config_util 'installer_latest_complete_artifact' <lambda>_:{'bootstrap':os.getenv('BOOTSTRAP_ID' '12345') 'packages':[]} )<block_end>
<import_stmt>hashlib<import_from_stmt>django template<line_sep>register=template.Library()<line_sep>@register.filter<def_stmt>to_gravatar_url email<block_start><return>('https://gravatar.com/avatar/%s?d=retro'%hashlib.md5((email<or>'').strip().lower().encode('utf-8')).hexdigest())<block_end>
<import_from_future_stmt> absolute_import<import_stmt>click<import_stmt>shub<line_sep>@click.command(help="Show shub version")<def_stmt>cli <block_start>click.echo(shub.__version__)<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkgdb.endpoint endpoint_data<class_stmt>CreateDBInstanceRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'gdb' '2019-09-03' 'CreateDBInstance' 'gds')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_ResourceOwnerId self<block_start><return>self.get_query_params().get('ResourceOwnerId')<block_end><def_stmt>set_ResourceOwnerId self ResourceOwnerId<block_start>self.add_query_param('ResourceOwnerId' ResourceOwnerId)<block_end><def_stmt>get_ClientToken self<block_start><return>self.get_query_params().get('ClientToken')<block_end><def_stmt>set_ClientToken self ClientToken<block_start>self.add_query_param('ClientToken' ClientToken)<block_end><def_stmt>get_DBInstanceCategory self<block_start><return>self.get_query_params().get('DBInstanceCategory')<block_end><def_stmt>set_DBInstanceCategory self DBInstanceCategory<block_start>self.add_query_param('DBInstanceCategory' DBInstanceCategory)<block_end><def_stmt>get_DBNodeStorageType self<block_start><return>self.get_query_params().get('DBNodeStorageType')<block_end><def_stmt>set_DBNodeStorageType self DBNodeStorageType<block_start>self.add_query_param('DBNodeStorageType' DBNodeStorageType)<block_end><def_stmt>get_DBInstanceDescription self<block_start><return>self.get_query_params().get('DBInstanceDescription')<block_end><def_stmt>set_DBInstanceDescription self DBInstanceDescription<block_start>self.add_query_param('DBInstanceDescription' DBInstanceDescription)<block_end><def_stmt>get_AutoRenewPeriod self<block_start><return>self.get_query_params().get('AutoRenewPeriod')<block_end><def_stmt>set_AutoRenewPeriod self AutoRenewPeriod<block_start>self.add_query_param('AutoRenewPeriod' AutoRenewPeriod)<block_end><def_stmt>get_Period self<block_start><return>self.get_query_params().get('Period')<block_end><def_stmt>set_Period self Period<block_start>self.add_query_param('Period' Period)<block_end><def_stmt>get_ResourceOwnerAccount self<block_start><return>self.get_query_params().get('ResourceOwnerAccount')<block_end><def_stmt>set_ResourceOwnerAccount self ResourceOwnerAccount<block_start>self.add_query_param('ResourceOwnerAccount' ResourceOwnerAccount)<block_end><def_stmt>get_OwnerAccount self<block_start><return>self.get_query_params().get('OwnerAccount')<block_end><def_stmt>set_OwnerAccount self OwnerAccount<block_start>self.add_query_param('OwnerAccount' OwnerAccount)<block_end><def_stmt>get_OwnerId self<block_start><return>self.get_query_params().get('OwnerId')<block_end><def_stmt>set_OwnerId self OwnerId<block_start>self.add_query_param('OwnerId' OwnerId)<block_end><def_stmt>get_UsedTime self<block_start><return>self.get_query_params().get('UsedTime')<block_end><def_stmt>set_UsedTime self UsedTime<block_start>self.add_query_param('UsedTime' UsedTime)<block_end><def_stmt>get_DBInstanceClass self<block_start><return>self.get_query_params().get('DBInstanceClass')<block_end><def_stmt>set_DBInstanceClass self DBInstanceClass<block_start>self.add_query_param('DBInstanceClass' DBInstanceClass)<block_end><def_stmt>get_VSwitchId self<block_start><return>self.get_query_params().get('VSwitchId')<block_end><def_stmt>set_VSwitchId self VSwitchId<block_start>self.add_query_param('VSwitchId' VSwitchId)<block_end><def_stmt>get_SecurityIPList self<block_start><return>self.get_query_params().get('SecurityIPList')<block_end><def_stmt>set_SecurityIPList self SecurityIPList<block_start>self.add_query_param('SecurityIPList' SecurityIPList)<block_end><def_stmt>get_DBNodeStorage self<block_start><return>self.get_query_params().get('DBNodeStorage')<block_end><def_stmt>set_DBNodeStorage self DBNodeStorage<block_start>self.add_query_param('DBNodeStorage' DBNodeStorage)<block_end><def_stmt>get_DBInstanceNetworkType self<block_start><return>self.get_query_params().get('DBInstanceNetworkType')<block_end><def_stmt>set_DBInstanceNetworkType self DBInstanceNetworkType<block_start>self.add_query_param('DBInstanceNetworkType' DBInstanceNetworkType)<block_end><def_stmt>get_AutoRenew self<block_start><return>self.get_query_params().get('AutoRenew')<block_end><def_stmt>set_AutoRenew self AutoRenew<block_start>self.add_query_param('AutoRenew' AutoRenew)<block_end><def_stmt>get_DBInstanceVersion self<block_start><return>self.get_query_params().get('DBInstanceVersion')<block_end><def_stmt>set_DBInstanceVersion self DBInstanceVersion<block_start>self.add_query_param('DBInstanceVersion' DBInstanceVersion)<block_end><def_stmt>get_VPCId self<block_start><return>self.get_query_params().get('VPCId')<block_end><def_stmt>set_VPCId self VPCId<block_start>self.add_query_param('VPCId' VPCId)<block_end><def_stmt>get_ZoneId self<block_start><return>self.get_query_params().get('ZoneId')<block_end><def_stmt>set_ZoneId self ZoneId<block_start>self.add_query_param('ZoneId' ZoneId)<block_end><def_stmt>get_PayType self<block_start><return>self.get_query_params().get('PayType')<block_end><def_stmt>set_PayType self PayType<block_start>self.add_query_param('PayType' PayType)<block_end><block_end>
# -*- coding: utf-8 -*- """ Execution module to handle MetalK8s sysctl. """<import_stmt>configparser<import_stmt>pathlib<import_from_stmt>salt.exceptions CommandExecutionError<import_stmt>salt.utils.files<line_sep>__virtualname__="metalk8s_sysctl"<line_sep># Order in this list defines the precedence SYSCTL_CFG_DIRECTORIES=["/run/sysctl.d" "/etc/sysctl.d" "/usr/local/lib/sysctl.d" "/usr/lib/sysctl.d" "/lib/sysctl.d" ]<line_sep># This file is applied last no matter what SYSCTL_DEFAULT_CFG="/etc/sysctl.conf"<def_stmt>__virtual__ <block_start><return>__virtualname__<block_end><def_stmt>_get_sysctl_files config<block_start>""" Return all the sysctl configuration files ordered as they are read by the system. Inject the configuration file passed in argument `config` in this list, in case this file does not exist yet. If the `config` file is not in an authorized path (see `SYSCTL_FILE_GLOBS` and `SYSCTL_DEFAULT_CFG`) or is overwritten by a file with the same name but higher precedence, it is ignored as the system will not take care of it anyway. """<line_sep>config_path=pathlib.Path(config).resolve()<line_sep>files={}<for_stmt>directory SYSCTL_CFG_DIRECTORIES<block_start>path=pathlib.Path(directory)<if_stmt>path<eq>config_path.parent<block_start>files.setdefault(config_path.name str(config_path))<block_end><for_stmt>cfg path.glob("*.conf")<block_start>files.setdefault(cfg.name str(cfg))<block_end><block_end>sorted_files=[files[name]<for>name sorted(files)]<line_sep>sorted_files.append(SYSCTL_DEFAULT_CFG)<line_sep><return>sorted_files<block_end><def_stmt>has_precedence name value config strict=<false><block_start>""" Read all sysctl configuration file to check if the passed `name` and `value` are not overwritten by an already existing sysctl configuration file. If `strict` is set, check that the final value comes from the passed `config` and not another sysctl configuration file (even if the value is equal to `value`). """<line_sep>sysctl_files=_get_sysctl_files(config)<line_sep># Ignore files before the `config` one. <try_stmt><block_start>sysctl_files=sysctl_files[sysctl_files.index(config)+1:]<block_end><except_stmt>ValueError# If the file is not in the list, it means it's overwritten by an # other sysctl configuration file with higher precedence. <block_start>config_name=pathlib.PurePath(config).name<for_stmt>sysctl_file sysctl_files<block_start>sysctl_name=pathlib.PurePath(sysctl_file).name<if_stmt>sysctl_name<eq>config_name<block_start><raise>CommandExecutionError(# pylint: disable=raise-missing-from "'{0}' has a higher precedence and overrides '{1}'".format(sysctl_file config))<block_end><block_end># The target file is not in a directory checked by the system <raise>CommandExecutionError(# pylint: disable=raise-missing-from "{0} is not a correct path for a sysctl configuration "<concat>"file, please use one of the following:\n- {1}".format(config "\n- ".join(SYSCTL_CFG_DIRECTORIES)))<block_end>parser=configparser.ConfigParser(interpolation=<none>)<line_sep>epured_value=" ".join(str(value).split())<for_stmt>sysctl_file sysctl_files<block_start><with_stmt>salt.utils.files.fopen(sysctl_file "r")<as>sysctl_fd<block_start>parser.read_file(["[global]" *sysctl_fd] source=sysctl_file)<block_end>sysctl=dict(parser.items("global"))<line_sep>parser.remove_section("global")<if_stmt>name<in>sysctl<and>(strict<or>" ".join(sysctl[name].split())<ne>epured_value)<block_start><raise>CommandExecutionError("'{0}' redefines '{1}' with value '{2}'".format(sysctl_file name sysctl[name]))<block_end><block_end><block_end>
<import_stmt>os<class_stmt>Package<block_start><def_stmt>__init__ self<block_start>self.root_dir=""<line_sep>self.name=""<line_sep>self.config=""<line_sep>self.commands=[]<line_sep>self.views=[]<line_sep>self.migrations=[]<line_sep>self.controller_locations=[]<line_sep>self.routes=[]<line_sep>self.assets=[]<block_end><def_stmt>_build_path self rel_path<block_start><return>os.path.join(self.root_dir rel_path)<block_end><def_stmt>add_config self config_path<block_start>self.config=self._build_path(config_path)<line_sep><return>self<block_end><def_stmt>add_views self *locations<block_start><for_stmt>location locations<block_start>self.views.append(self._build_path(location))<block_end><return>self<block_end><def_stmt>add_migrations self *migrations<block_start><for_stmt>migration migrations<block_start>self.migrations.append(self._build_path(migration))<block_end><return>self<block_end><def_stmt>add_routes self *routes<block_start><for_stmt>route routes<block_start>self.routes.append(self._build_path(route))<block_end><return>self<block_end><def_stmt>add_assets self *assets<block_start><for_stmt>asset assets<block_start>self.assets.append(self._build_path(asset))<block_end><return>self<block_end><def_stmt>add_controller_locations self *controller_locations<block_start><for_stmt>loc controller_locations<block_start>self.controller_locations.append(self._build_path(loc))<block_end><return>self<block_end><block_end>
<import_stmt>pylab<def_stmt>plot_episode episode classes_first=<true><block_start>sample_set=episode["support_set"].cpu()<line_sep>query_set=episode["query_set"].cpu()<line_sep>support_size=episode["support_size"]<line_sep>query_size=episode["query_size"]<if_stmt><not>classes_first<block_start>sample_set=sample_set.permute(1 0 2 3 4)<line_sep>query_set=query_set.permute(1 0 2 3 4)<block_end>n,support_size,c,h,w=sample_set.size()<line_sep>n,query_size,c,h,w=query_set.size()<line_sep>sample_set=((sample_set/2+0.5)<times>255).numpy().astype('uint8').transpose((0 3 1 4 2)).reshape((n<times>h support_size<times>w c))<line_sep>pylab.imsave('support_set.png' sample_set)<line_sep>query_set=((query_set/2+0.5)<times>255).numpy().astype('uint8').transpose((0 3 1 4 2)).reshape((n<times>h query_size<times>w c))<line_sep>pylab.imsave('query_set.png' query_set)<line_sep># pylab.imshow(query_set) # pylab.title("query_set") # pylab.show() # pylab.savefig('query_set.png') <block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>ctypes<import_stmt>numpy<import_from_stmt>nidaqmx._lib lib_importer wrapped_ndpointer ctypes_byte_str<import_from_stmt>nidaqmx.system.physical_channel PhysicalChannel<import_from_stmt>nidaqmx.errors check_for_error is_string_buffer_too_small is_array_buffer_too_small <import_from_stmt>nidaqmx._task_modules.triggering.arm_start_trigger ArmStartTrigger<import_from_stmt>nidaqmx._task_modules.triggering.handshake_trigger HandshakeTrigger<import_from_stmt>nidaqmx._task_modules.triggering.pause_trigger PauseTrigger<import_from_stmt>nidaqmx._task_modules.triggering.reference_trigger ReferenceTrigger<import_from_stmt>nidaqmx._task_modules.triggering.start_trigger StartTrigger<import_from_stmt>nidaqmx.constants SyncType <class_stmt>Triggers(object)<block_start>""" Represents the trigger configurations for a DAQmx task. """<def_stmt>__init__ self task_handle<block_start>self._handle=task_handle<line_sep>self._arm_start_trigger=ArmStartTrigger(self._handle)<line_sep>self._handshake_trigger=HandshakeTrigger(self._handle)<line_sep>self._pause_trigger=PauseTrigger(self._handle)<line_sep>self._reference_trigger=ReferenceTrigger(self._handle)<line_sep>self._start_trigger=StartTrigger(self._handle)<block_end>@property<def_stmt>arm_start_trigger self<block_start>""" :class:`nidaqmx._task_modules.triggering.arm_start_trigger.ArmStartTrigger`: Gets the arm start trigger configurations for the task. """<line_sep><return>self._arm_start_trigger<block_end>@property<def_stmt>handshake_trigger self<block_start>""" :class:`nidaqmx._task_modules.triggering.handshake_trigger.HandshakeTrigger`: Gets the handshake trigger configurations for the task. """<line_sep><return>self._handshake_trigger<block_end>@property<def_stmt>pause_trigger self<block_start>""" :class:`nidaqmx._task_modules.triggering.pause_trigger.PauseTrigger`: Gets the pause trigger configurations for the task. """<line_sep><return>self._pause_trigger<block_end>@property<def_stmt>reference_trigger self<block_start>""" :class:`nidaqmx._task_modules.triggering.reference_trigger.ReferenceTrigger`: Gets the reference trigger configurations for the task. """<line_sep><return>self._reference_trigger<block_end>@property<def_stmt>start_trigger self<block_start>""" :class:`nidaqmx._task_modules.triggering.start_trigger.StartTrigger`: Gets the start trigger configurations for the task. """<line_sep><return>self._start_trigger<block_end>@property<def_stmt>sync_type self<block_start>""" :class:`nidaqmx.constants.SyncType`: Specifies the role of the device in a synchronized system. Setting this value to **SyncType.MASTER** or **SyncType.SLAVE** enables trigger skew correction. If you enable trigger skew correction, set this property to **SyncType.MASTER** on only one device, and set this property to **SyncType.SLAVE** on the other devices. """<line_sep>val=ctypes.c_int()<line_sep>cfunc=lib_importer.windll.DAQmxGetTriggerSyncType<if_stmt>cfunc.argtypes<is><none><block_start><with_stmt>cfunc.arglock<block_start><if_stmt>cfunc.argtypes<is><none><block_start>cfunc.argtypes=[lib_importer.task_handle ctypes.POINTER(ctypes.c_int)]<block_end><block_end><block_end>error_code=cfunc(self._handle ctypes.byref(val))<line_sep>check_for_error(error_code)<line_sep><return>SyncType(val.value)<block_end>@sync_type.setter<def_stmt>sync_type self val<block_start>val=val.value<line_sep>cfunc=lib_importer.windll.DAQmxSetTriggerSyncType<if_stmt>cfunc.argtypes<is><none><block_start><with_stmt>cfunc.arglock<block_start><if_stmt>cfunc.argtypes<is><none><block_start>cfunc.argtypes=[lib_importer.task_handle ctypes.c_int]<block_end><block_end><block_end>error_code=cfunc(self._handle val)<line_sep>check_for_error(error_code)<block_end>@sync_type.deleter<def_stmt>sync_type self<block_start>cfunc=lib_importer.windll.DAQmxResetTriggerSyncType<if_stmt>cfunc.argtypes<is><none><block_start><with_stmt>cfunc.arglock<block_start><if_stmt>cfunc.argtypes<is><none><block_start>cfunc.argtypes=[lib_importer.task_handle]<block_end><block_end><block_end>error_code=cfunc(self._handle)<line_sep>check_for_error(error_code)<block_end><block_end>
"""An evolutionary package for all common opytimizer modules. It contains implementations of miscellaneous-based optimizers. """<import_from_stmt>opytimizer.optimizers.misc.aoa AOA<import_from_stmt>opytimizer.optimizers.misc.cem CEM<import_from_stmt>opytimizer.optimizers.misc.doa DOA<import_from_stmt>opytimizer.optimizers.misc.gs GS<import_from_stmt>opytimizer.optimizers.misc.hc HC<line_sep>
<import_from_stmt>.beam_search_generator BeamSearchGenerator<import_from_stmt>.transformer_beam_search_generator TransformerBeamSearchGenerator<line_sep>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layout to monitor motors."""<import_from_stmt>makani.gs.monitor2.apps.layout base<import_from_stmt>makani.gs.monitor2.apps.plugins common<import_from_stmt>makani.gs.monitor2.apps.plugins.indicators node_status<line_sep>_WING_TMS570_NODES=common.WingTms570Nodes()<class_stmt>StatusLayout(base.BaseLayout)<block_start>"""Layout to monitor motors."""<line_sep>_NAME='Status'<line_sep>_DESIRED_VIEW_COLS=12<def_stmt>Initialize self<block_start>self._AddIndicators('Network' [node_status.TetherNodeNetworkIndicator(node_name node_name node_name<not><in>common.NETWORK_STATUS_NODES_TO_EXCLUDE)<for>node_name _WING_TMS570_NODES] {'cols':3})<line_sep>self._AddIndicators('Failures' [node_status.TetherNodeFailureIndicator(node_name node_name)<for>node_name _WING_TMS570_NODES] {'cols':2})<line_sep>self._AddIndicators('Power' [node_status.TetherNodePowerIndicator(node_name node_name)<for>node_name _WING_TMS570_NODES] {'cols':2})<line_sep>self._AddIndicators('Temperature [C]' [node_status.TetherNodeTempIndicator(node_name node_name)<for>node_name _WING_TMS570_NODES] {'cols':2})<line_sep>self._AddIndicators('Humidity' [node_status.TetherNodeHumidityIndicator(node_name node_name)<for>node_name _WING_TMS570_NODES] {'cols':2})<block_end><block_end>
"""Probabilistic linear solvers. Iterative probabilistic numerical methods solving linear systems :math:`Ax = b`. """<class_stmt>ProbabilisticLinearSolver<block_start>r"""Compose a custom probabilistic linear solver. Class implementing probabilistic linear solvers. Such (iterative) solvers infer solutions to problems of the form .. math:: Ax=b, where :math:`A \in \mathbb{R}^{n \times n}` and :math:`b \in \mathbb{R}^{n}`. They return a probability measure which quantifies uncertainty in the output arising from finite computational resources or stochastic input. This class unifies and generalizes probabilistic linear solvers as described in the literature. [1]_ [2]_ [3]_ [4]_ Parameters ---------- References ---------- .. [1] <NAME>., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on Optimization*, 2015, 25, 234-260 .. [2] <NAME>. et al., A Bayesian Conjugate Gradient Method, *Bayesian Analysis*, 2019, 14, 937-1012 .. [3] <NAME> al., Probabilistic Linear Solvers: A Unifying View, *Statistics and Computing*, 2019 .. [4] <NAME>. and <NAME>., Probabilistic Linear Solvers for Machine Learning, *Advances in Neural Information Processing Systems (NeurIPS)*, 2020 See Also -------- problinsolve : Solve linear systems in a Bayesian framework. bayescg : Solve linear systems with prior information on the solution. Examples -------- """<block_end>
<def_stmt>miniPeaks nums<block_start>result=[]<line_sep>left=0<line_sep>right=0<for_stmt>i range(1 len(nums)-1)<block_start>left=nums[i-1]<line_sep>right=nums[i+1]<if_stmt>nums[i]<g>left<and>nums[i]<g>right<block_start>result.append(nums[i])<block_end><block_end><return>result<block_end># Time Complexity : O(n) # Space Complexity : O(m), # n = nos of elements # m = nos of peak elements
""" Usage: kungfu-run -q -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method CPU kungfu-run -q -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method NCCL kungfu-run -q -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method NCCL+CPU mpirun -np 4 python3 -m kungfu.tensorflow.v1.benchmarks --method HOROVOD """<import_stmt>argparse<import_stmt>os<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>kungfu._utils measure one_based_range<import_from_stmt>kungfu.python _get_cuda_index<import_from_stmt>kungfu.tensorflow.ops current_cluster_size current_rank group_all_reduce group_nccl_all_reduce <import_from_stmt>kungfu.tensorflow.ops.collective group_hierarchical_nccl_all_reduce<import_from_stmt>kungfu.tensorflow.v1.benchmarks model_sizes<import_from_stmt>kungfu.tensorflow.v1.helpers.utils show_rate show_size<import_from_stmt>tensorflow.python.util deprecation<line_sep>deprecation._PRINT_DEPRECATION_WARNINGS=<false><def_stmt>_tensor_size t<block_start><return>t.shape.num_elements()<times>t.dtype.size<block_end><def_stmt>hvd_init <block_start><import_stmt>horovod.tensorflow<as>hvd<line_sep>hvd.init()<block_end><def_stmt>hvd_group_all_reduce ts<block_start><import_stmt>horovod.tensorflow<as>hvd<line_sep><return>[hvd.allreduce(t average=<false>)<for>t ts]<block_end><def_stmt>get_cluster_size method<block_start><if_stmt>method<eq>'HOROVOD'<block_start><import_stmt>horovod.tensorflow<as>hvd<line_sep><return>hvd.size()<block_end><else_stmt><block_start><return>current_cluster_size()<block_end><block_end><def_stmt>get_rank method<block_start><if_stmt>method<eq>'HOROVOD'<block_start><import_stmt>horovod.tensorflow<as>hvd<line_sep><return>hvd.rank()<block_end><else_stmt><block_start><return>current_rank()<block_end><block_end>_group_all_reduce_func={'CPU':group_all_reduce 'NCCL':group_nccl_all_reduce 'NCCL+CPU':group_hierarchical_nccl_all_reduce 'HOROVOD':hvd_group_all_reduce }<line_sep>_model_sizes={'ResNet50':model_sizes.resnet50_imagenet 'VGG16':model_sizes.vgg16_imagenet 'BERT':model_sizes.bert }<def_stmt>_config method<block_start>config=tf.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><if_stmt>method<eq>'HOROVOD'<block_start><import_stmt>horovod.tensorflow<as>hvd<line_sep>config.gpu_options.visible_device_list=str(hvd.local_rank())<block_end><else_stmt><block_start>config.gpu_options.visible_device_list=str(_get_cuda_index())<block_end><return>config<block_end><def_stmt>_rank method<block_start><if_stmt>method<eq>'HOROVOD'<block_start><import_stmt>horovod.tensorflow<as>hvd<line_sep><return>hvd.rank()<block_end><else_stmt><block_start><return>current_rank()<block_end><block_end><def_stmt>parse_args <block_start>p=argparse.ArgumentParser(description='Perf Benchmarks.')<line_sep>p.add_argument('--model' type=str default='ResNet50' help='ResNet50 | VGG16 | BERT')<line_sep>p.add_argument('--method' type=str default='CPU' help='CPU | NCCL | HOROVOD')<line_sep>p.add_argument('--fuse' action='store_true' default=<false> help='')<line_sep>p.add_argument('--max-count' type=int default=0 help='max grad count')<line_sep>p.add_argument('--steps' type=int default=10 help='number of steps to run')<line_sep>p.add_argument('--warmup-steps' type=int default=5 help='number of warmup steps')<line_sep><return>p.parse_args()<block_end><def_stmt>log_detailed_result value error attrs<block_start><import_stmt>json<line_sep>attr_str=json.dumps(attrs separators=(',' ':'))<line_sep># grep -o RESULT.* *.log unit='GiB/s'<line_sep>print('RESULT: %f +-%f (%s) %s'%(value error unit attr_str))<block_end><def_stmt>log_final_result values args<block_start>attrs={'method':args.method 'np':get_cluster_size(args.method) 'model':args.model 'fuse':args.fuse }<line_sep>values=np.array(values)<if_stmt>args.method<ne>'HOROVOD'<block_start>attrs['strategy']=os.getenv('KUNGFU_ALLREDUCE_STRATEGY')<line_sep>attrs['nvlink']=os.getenv('KUNGFU_ALLOW_NVLINK')<block_end>log_detailed_result(values.mean() 1.96<times>values.std() attrs)<block_end><def_stmt>all_reduce_benchmark sizes dtype args<block_start>rank=_rank(args.method)<def_stmt>log msg<block_start><if_stmt>rank<eq>0<block_start>print(msg)<block_end><block_end>xs=[tf.Variable(tf.ones([n] dtype))<for>n sizes]<line_sep>tot_size=sum(_tensor_size(x)<for>x xs)<line_sep>np=get_cluster_size(args.method)<line_sep>multiplier=4<times>(np-1)<line_sep>log('all reduce %d tensors of total size: %s among %d peers, using %s'%(len(sizes) show_size(tot_size) np args.method))<line_sep>ys=_group_all_reduce_func[args.method](xs)<line_sep>init=tf.global_variables_initializer()<line_sep>values=[]<with_stmt>tf.Session(config=_config(args.method))<as>sess<block_start>duration,_=measure(<lambda>:sess.run(init))<line_sep>log('tensorflow init took %.fs'%(duration))<for_stmt>step one_based_range(args.warmup_steps)<block_start>duration,_=measure(<lambda>:sess.run(ys))<line_sep>log('warmup step %d, took %.2fs, equivalent data rate: %s'%(step duration show_rate(tot_size<times>multiplier duration)))<block_end><for_stmt>step one_based_range(args.steps)<block_start>duration,_=measure(<lambda>:sess.run(ys))<line_sep>gi=1024<times>1024<times>1024<line_sep>values.append(tot_size<times>multiplier/gi/duration)<line_sep>log('step %d, took %.2fs, equivalent data rate: %s'%(step duration show_rate(tot_size<times>multiplier duration)))<block_end><block_end><if_stmt>get_rank(args.method)<eq>0<block_start>log_final_result(values args)<block_end><block_end><def_stmt>main _<block_start>args=parse_args()<if_stmt>args.method<eq>'HOROVOD'<block_start>hvd_init()<block_end>dtype=tf.float32<line_sep>sizes=_model_sizes[args.model]<if_stmt>args.fuse<block_start>sizes=[sum(sizes)]<block_end><if_stmt>args.max_count<g>0<and>len(sizes)<g>args.max_count<block_start>sizes=sizes[:args.max_count]<block_end>all_reduce_benchmark(sizes dtype args)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main(sys.argv)<block_end>
"""A10 support."""<import_from_stmt>netmiko.cisco_base_connection CiscoSSHConnection<class_stmt>A10SSH(CiscoSSHConnection)<block_start>"""A10 support."""<def_stmt>session_preparation self<arrow><none><block_start>"""A10 requires to be enable mode to disable paging."""<line_sep>self._test_channel_read(pattern=r"[>#]")<line_sep>self.set_base_prompt()<line_sep>self.enable()<line_sep># terminal width ill not do anything without A10 specific command # self.set_terminal_width() self.disable_paging(command="terminal length 0")<block_end><def_stmt>save_config self cmd:str="" confirm:bool=<false> confirm_response:str=""<arrow>str<block_start>"""Not Implemented"""<line_sep><raise>NotImplementedError<block_end><block_end>
<import_stmt>typing<def_stmt>_is_prime n:int<arrow>bool<block_start>''' Reference: <NAME> and <NAME>, Fast Primality Testing for Integers That Fit into a Machine Word '''<if_stmt>n<le>1<block_start><return><false><block_end><if_stmt>n<eq>2<or>n<eq>7<or>n<eq>61<block_start><return><true><block_end><if_stmt>n%2<eq>0<block_start><return><false><block_end>d=n-1<while_stmt>d%2<eq>0<block_start>d<augfloordiv>2<block_end><for_stmt>a (2 7 61)<block_start>t=d<line_sep>y=pow(a t n)<while_stmt>t<ne>n-1<and>y<ne>1<and>y<ne>n-1<block_start>y=y<times>y%n<line_sep>t<auglshift>1<block_end><if_stmt>y<ne>n-1<and>t%2<eq>0<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>_inv_gcd a:int b:int<arrow>typing.Tuple[int int]<block_start>a<augmod>b<if_stmt>a<eq>0<block_start><return>(b 0)<block_end># Contracts: # [1] s - m0 * a = 0 (mod b) # [2] t - m1 * a = 0 (mod b) # [3] s * |m1| + t * |m0| <= b s=b<line_sep>t=a<line_sep>m0=0<line_sep>m1=1<while_stmt>t<block_start>u=s<floordiv>t<line_sep>s<augsub>t<times>u<line_sep>m0<augsub>m1<times>u# |m1 * u| <= |m1| * s <= b # [3]: # (s - t * u) * |m1| + t * |m0 - m1 * u| # <= s * |m1| - t * u * |m1| + t * (|m0| + |m1| * u) # = s * |m1| + t * |m0| <= b s,t=t s<line_sep>m0,m1=m1 m0<block_end># by [3]: |m0| <= b/g # by g != b: |m0| < b/g <if_stmt>m0<l>0<block_start>m0<augadd>b<floordiv>s<block_end><return>(s m0)<block_end><def_stmt>_primitive_root m:int<arrow>int<block_start><if_stmt>m<eq>2<block_start><return>1<block_end><if_stmt>m<eq>167772161<block_start><return>3<block_end><if_stmt>m<eq>469762049<block_start><return>3<block_end><if_stmt>m<eq>754974721<block_start><return>11<block_end><if_stmt>m<eq>998244353<block_start><return>3<block_end>divs=[2]+[0]<times>19<line_sep>cnt=1<line_sep>x=(m-1)<floordiv>2<while_stmt>x%2<eq>0<block_start>x<augfloordiv>2<block_end>i=3<while_stmt>i<times>i<le>x<block_start><if_stmt>x%i<eq>0<block_start>divs[cnt]=i<line_sep>cnt<augadd>1<while_stmt>x%i<eq>0<block_start>x<augfloordiv>i<block_end><block_end>i<augadd>2<block_end><if_stmt>x<g>1<block_start>divs[cnt]=x<line_sep>cnt<augadd>1<block_end>g=2<while_stmt><true><block_start><for_stmt>i range(cnt)<block_start><if_stmt>pow(g (m-1)<floordiv>divs[i] m)<eq>1<block_start><break><block_end><block_end><else_stmt><block_start><return>g<block_end>g<augadd>1<block_end><block_end>
# coding=utf-8 # Copyright 2020 The Real-World RL Suite Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs a random policy on realworldrl."""<import_stmt>os<import_from_stmt>absl app<import_from_stmt>absl flags<import_stmt>numpy<as>np<import_stmt>realworldrl_suite.environments<as>rwrl<line_sep>flags.DEFINE_string('domain_name' 'cartpole' 'domain to solve')<line_sep>flags.DEFINE_string('task_name' 'realworld_balance' 'task to solve')<line_sep>flags.DEFINE_string('save_path' '/tmp/rwrl' 'where to save results')<line_sep>flags.DEFINE_integer('total_episodes' 100 'number of episodes')<line_sep>FLAGS=flags.FLAGS<def_stmt>random_policy action_spec<block_start><def_stmt>_act timestep<block_start><del_stmt>timestep<line_sep><return>np.random.uniform(low=action_spec.minimum high=action_spec.maximum size=action_spec.shape)<block_end><return>_act<block_end><def_stmt>run <block_start>"""Runs a random agent on a given environment."""<line_sep>env=rwrl.load(domain_name=FLAGS.domain_name task_name=FLAGS.task_name safety_spec=dict(enable=<true>) delay_spec=dict(enable=<true> actions=20) log_output=os.path.join(FLAGS.save_path 'log.npz') environment_kwargs=dict(log_safety_vars=<true> log_every=20 flat_observation=<true>))<line_sep>policy=random_policy(action_spec=env.action_spec())<line_sep>rewards=[]<for_stmt>_ range(FLAGS.total_episodes)<block_start>timestep=env.reset()<line_sep>total_reward=0.<while_stmt><not>timestep.last()<block_start>action=policy(timestep)<line_sep>timestep=env.step(action)<line_sep>total_reward<augadd>timestep.reward<block_end>rewards.append(total_reward)<block_end>print('Random policy total reward per episode: {:.2f} +- {:.2f}'.format(np.mean(rewards) np.std(rewards)))<block_end><def_stmt>main argv<block_start><del_stmt>argv# Unused. run()<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
""" Reorganization of Array Elements Routines =========================== """<import_stmt>warnings<import_stmt>numpy_force<as>numpy<import_from_stmt>. bhary<import_from_stmt>bohrium_api _info<import_from_stmt>._util is_scalar<import_from_stmt>.bhary fix_biclass_wrapper<import_from_stmt>. array_create<import_from_stmt>. array_manipulation<import_from_stmt>. ufuncs<import_from_stmt>. numpy_backport<line_sep>@fix_biclass_wrapper<def_stmt>gather ary indexes<block_start>""" gather(ary, indexes) Gather elements from 'ary' selected by 'indexes'. The values of 'indexes' are absolute indexed into a flatten 'ary' The shape of the returned array equals indexes.shape. Parameters ---------- ary : array_like The array to gather elements from. indexes : array_like, interpreted as integers Array or list of indexes that will be gather from 'array' Returns ------- r : ndarray The gathered array freshly-allocated. """<import_from_stmt>. _bh<line_sep>ary=array_manipulation.flatten(array_create.array(ary))<line_sep># Convert a scalar index to a 1-element array <if_stmt>is_scalar(indexes)<block_start>indexes=[indexes]<block_end>indexes=array_create.array(indexes dtype=numpy.uint64 bohrium=<true>)<line_sep>ret=array_create.empty(indexes.shape dtype=ary.dtype bohrium=<true>)<if_stmt>ary.size<eq>0<or>indexes.size<eq>0<block_start><return>array_create.array([])<block_end>_bh.ufunc(_info.op['gather']['id'] (ret ary indexes))<line_sep><return>ret<block_end>@fix_biclass_wrapper<def_stmt>take a indices axis=<none> out=<none> mode='raise'<block_start>""" Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- a : array_like The source array. indices : array_like, interpreted as integers The indices of the values to extract. .. versionadded:: 1.8.0 Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened input array is used. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- subarray : ndarray The returned array has the same type as `a`. See Also -------- compress : Take elements using a boolean mask ndarray.take : equivalent method Examples -------- >>> a = [4, 3, 5, 7, 6, 8] >>> indices = [0, 1, 4] >>> np.take(a, indices) array([4, 3, 6]) In this example if `a` is an ndarray, "fancy" indexing can be used. >>> a = np.array(a) >>> a[indices] array([4, 3, 6]) If `indices` is not one dimensional, the output also has these dimensions. >>> np.take(a, [[0, 1], [2, 3]]) array([[4, 3], [5, 7]]) """<if_stmt><not>bhary.check(a)<block_start>indices=array_create.array(indices bohrium=<false>)<line_sep><return>numpy.take(a indices axis=axis out=out mode=mode)<block_end><if_stmt>mode<ne>"raise"<block_start>warnings.warn("Bohrium only supports the 'raise' mode not '%s', "<concat>"it will be handled by the original NumPy."%mode UserWarning 2)<line_sep>a=array_create.array(a bohrium=<false>)<line_sep>indices=array_create.array(indices bohrium=<false>)<line_sep><return>numpy.take(a indices axis=axis out=out mode=mode)<block_end><if_stmt>axis<is><not><none><and>a.ndim<g>1<block_start>warnings.warn("Bohrium does not support the 'axis' argument, "<concat>"it will be handled by the original NumPy." UserWarning 2)<line_sep>a=array_create.array(a bohrium=<false>)<line_sep>indices=array_create.array(indices bohrium=<false>)<line_sep><return>numpy.take(a indices axis=axis out=out mode=mode)<block_end>ret=gather(a indices)<if_stmt>out<is><not><none><block_start>out[<ellipsis>]=ret<line_sep><return>out<block_end><else_stmt><block_start><return>ret<block_end><block_end>@fix_biclass_wrapper<def_stmt>take_using_index_tuple a index_tuple out=<none><block_start>""" Take elements from the array 'a' specified by 'index_tuple' This function is very similar to take(), but takes a tuple of index arrays rather than a single index array Parameters ---------- a : array_like The source array. index_tuple : tuple of array_like, interpreted as integers Each array in the tuple specified the indices of the values to extract for that axis. The number of arrays in 'index_tuple' must equal the number of dimension in 'a' out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. Returns ------- subarray : ndarray The returned array has the same type as `a`. """<if_stmt><not>bhary.check(a)<block_start>ret=a[index_tuple]<if_stmt>out<is><not><none><block_start>out[<ellipsis>]=ret<line_sep><return>out<block_end><else_stmt><block_start><return>ret<block_end><block_end><assert_stmt>len(index_tuple)<eq>a.ndim<if_stmt>a.size<eq>0<block_start><return>array_create.array([] dtype=a.dtype)<block_end><if_stmt>a.ndim<eq>1<block_start><return>take(a index_tuple[0] out=out)<block_end># Make sure that all index arrays are uint64 bohrium arrays index_list=[]<for_stmt>index index_tuple<block_start>index_list.append(array_create.array(index dtype=numpy.uint64 bohrium=<true>))<if_stmt>index_list[-1].size<eq>0<block_start><return>array_create.empty((0 ) dtype=a.dtype)<block_end><block_end># And then broadcast them into the same shape index_list=array_manipulation.broadcast_arrays(*index_list)[0]<line_sep># Let's find the absolute index abs_index=index_list[-1].copy()<line_sep>stride=a.shape[-1]<for_stmt>i range(len(index_list)-2 -1 -1)# Iterate backwards from index_list[-2] <block_start>abs_index<augadd>index_list[i]<times>stride<line_sep>stride<augmul>a.shape[i]<block_end># take() support absolute indices ret=take(a abs_index).reshape(index_list[0].shape)<if_stmt>out<is><not><none><block_start>out[<ellipsis>]=ret<line_sep><return>out<block_end><else_stmt><block_start><return>ret<block_end><block_end>@fix_biclass_wrapper<def_stmt>scatter ary indexes values<block_start>""" scatter(ary, indexes, values) Scatter 'values' into 'ary' selected by 'indexes'. The values of 'indexes' are absolute indexed into a flatten 'ary' The shape of 'indexes' and 'value' must be equal. Parameters ---------- ary : array_like The target array to write the values to. indexes : array_like, interpreted as integers Array or list of indexes that will be written to in 'ary' values : array_like Values to write into 'ary" """<import_from_stmt>. _bh<line_sep>indexes=array_manipulation.flatten(array_create.array(indexes dtype=numpy.uint64) always_copy=<false>)<line_sep>values=array_manipulation.flatten(array_create.array(values dtype=ary.dtype) always_copy=<false>)<assert_stmt>indexes.shape<eq>values.shape<if_stmt>ary.size<eq>0<or>indexes.size<eq>0<block_start><return><block_end># In order to ensure a contiguous array, we do the scatter on a flatten copy flat=array_manipulation.flatten(ary always_copy=<true>)<line_sep>_bh.ufunc(_info.op['scatter']['id'] (flat values indexes))<line_sep>ary[<ellipsis>]=flat.reshape(ary.shape)<block_end>@fix_biclass_wrapper<def_stmt>put a ind v mode='raise'<block_start>""" Replaces specified elements of an array with given values. The indexing works on the flattened target array. `put` is roughly equivalent to: :: a.flat[ind] = v Parameters ---------- a : ndarray Target array. ind : array_like Target indices, interpreted as integers. v : array_like Values to place in `a` at target indices. If `v` is shorter than `ind` it will be repeated as necessary. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. See Also -------- putmask, place, take Examples -------- >>> a = np.arange(5) >>> np.put(a, [0, 2], [-44, -55]) >>> a array([-44, 1, -55, 3, 4]) >>> a = np.arange(5) >>> np.put(a, 22, -5, mode='clip') >>> a array([ 0, 1, 2, 3, -5]) """<if_stmt>ind.size<eq>0<block_start><return><block_end># Nothing to insert! <if_stmt><not>bhary.check(a)<block_start><return>numpy.put(a ind.astype(numpy.int64) v mode=mode)<block_end><if_stmt>mode<ne>"raise"<block_start>warnings.warn("Bohrium only supports the 'raise' mode not '%s', "<concat>"it will be handled by the original NumPy."%mode UserWarning 2)<line_sep><return>numpy.put(a ind v mode=mode)<block_end>indexes=array_manipulation.flatten(array_create.array(ind dtype=numpy.uint64) always_copy=<false>)<line_sep>values=array_manipulation.flatten(array_create.array(v dtype=a.dtype) always_copy=<false>)<line_sep># Now let's make the shape of 'indexes' and 'values' match <if_stmt>indexes.size<g>values.size<block_start><if_stmt>values.size<eq>1# When 'values' is a scalar, we can broadcast it to match 'indexes' <block_start>values=numpy_backport.as_strided(values shape=indexes.shape strides=(0 ))<block_end><else_stmt># else we repeat 'values' enough times to be larger than 'indexes' <block_start>values=numpy_backport.as_strided(values shape=(indexes.size<floordiv>values.size+2 values.size) strides=(0 values.itemsize))<line_sep>values=array_manipulation.flatten(values always_copy=<false>)<block_end><block_end># When 'values' is too large, we simple cut the end off <if_stmt>values.size<g>indexes.size<block_start>values=values[0:indexes.size]<block_end># Now that 'indexes' and 'values' have the same shape, we can call 'scatter' scatter(a indexes values)<block_end>@fix_biclass_wrapper<def_stmt>put_using_index_tuple a index_tuple v<block_start>""" Replaces specified elements of an array with given values. This function is very similar to put(), but takes a tuple of index arrays rather than a single index array. The indexing works like fancy indexing: :: a[index_tuple] = v Parameters ---------- a : array_like The source array. index_tuple : tuple of array_like, interpreted as integers Each array in the tuple specified the indices of the values to extract for that axis. The number of arrays in 'index_tuple' must equal the number of dimension in 'a' v : array_like Values to place in `a`. Returns ------- subarray : ndarray The returned array has the same type as `a`. """<if_stmt><not>bhary.check(a)<block_start>a[index_tuple]=array_create.array(v bohrium=<false>)<line_sep><return><block_end>v=array_create.array(v bohrium=<true>)<assert_stmt>len(index_tuple)<eq>a.ndim<if_stmt>a.size<eq>0<block_start><return><block_end><if_stmt>a.ndim<eq>1<block_start><return>put(a index_tuple[0] v)<block_end># Make sure that all index arrays are uint64 bohrium arrays index_list=[]<for_stmt>index index_tuple<block_start>index_list.append(array_create.array(index dtype=numpy.uint64 bohrium=<true>))<if_stmt>index_list[-1].size<eq>0<block_start><return>array_create.empty((0 ) dtype=a.dtype)<block_end><block_end># And then broadcast them into the same shape index_list=array_manipulation.broadcast_arrays(*index_list)[0]<line_sep># Let's find the absolute index abs_index=index_list[-1].copy()<line_sep>stride=a.shape[-1]<for_stmt>i range(len(index_list)-2 -1 -1)# Iterate backwards from index_list[-2] <block_start>abs_index<augadd>index_list[i]<times>stride<line_sep>stride<augmul>a.shape[i]<block_end># put() support absolute indices put(a abs_index v)<block_end>@fix_biclass_wrapper<def_stmt>cond_scatter ary indexes values mask<block_start>""" scatter(ary, indexes, values, mask) Scatter 'values' into 'ary' selected by 'indexes' where 'mask' is true. The values of 'indexes' are absolute indexed into a flatten 'ary' The shape of 'indexes', 'value', and 'mask' must be equal. Parameters ---------- ary : array_like The target array to write the values to. indexes : array_like, interpreted as integers Array or list of indexes that will be written to in 'ary' values : array_like Values to write into 'ary' mask : array_like, interpreted as booleans A mask that specifies which indexes and values to include and exclude """<import_from_stmt>. _bh<line_sep>indexes=array_manipulation.flatten(array_create.array(indexes dtype=numpy.uint64) always_copy=<false>)<line_sep>values=array_manipulation.flatten(array_create.array(values dtype=ary.dtype) always_copy=<false>)<line_sep>mask=array_manipulation.flatten(array_create.array(mask dtype=numpy.bool) always_copy=<false>)<assert_stmt>(indexes.shape<eq>values.shape<and>values.shape<eq>mask.shape)<if_stmt>ary.size<eq>0<or>indexes.size<eq>0<block_start><return><block_end># In order to ensure a contiguous array, we do the scatter on a flatten copy flat=array_manipulation.flatten(ary always_copy=<true>)<line_sep>_bh.ufunc(_info.op['cond_scatter']['id'] (flat values indexes mask))<line_sep>ary[<ellipsis>]=flat.reshape(ary.shape)<block_end>@fix_biclass_wrapper<def_stmt>pack ary mask<block_start>""" pack(ary, mask) Packing the elements of 'ary' specified by 'mask' into new array that are contiguous The values of 'indexes' are absolute indexed into a flatten 'ary' The shape of 'mask' and 'ary' must be equal. Parameters ---------- ary : array_like, read flatten The array to read from. mask : array_like, interpreted as a flatten boolean array A mask that specifies which indexes of 'ary' to read """<line_sep>ary=array_manipulation.flatten(array_create.array(ary) always_copy=<false>)<line_sep>mask=array_manipulation.flatten(array_create.array(mask dtype=numpy.bool) always_copy=<false>)<assert_stmt>(ary.shape<eq>mask.shape)<if_stmt>ary.size<eq>0<or>mask.size<eq>0<block_start><return><block_end>true_indexes=ufuncs.add.accumulate(mask)<line_sep>true_count=int(true_indexes[-1])<if_stmt>true_count<eq>0<block_start><return>array_create.empty((0 ) dtype=ary.dtype)<block_end><else_stmt><block_start>ret=array_create.empty((true_count ) dtype=ary.dtype)<line_sep>cond_scatter(ret true_indexes-1 ary mask)<line_sep><return>ret<block_end><block_end>@fix_biclass_wrapper<def_stmt>flatnonzero a<block_start>""" Return indices that are non-zero in the flattened version of a. This is equivalent to a.ravel().nonzero()[0]. Parameters ---------- a : ndarray Input array. Returns ------- res : ndarray Output array, containing the indices of the elements of `a.ravel()` that are non-zero. See Also -------- nonzero : Return the indices of the non-zero elements of the input array. ravel : Return a 1-D array containing the elements of the input array. Examples -------- >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) >>> np.flatnonzero(x) array([0, 1, 3, 4]) Use the indices of the non-zero elements as an index array to extract these elements: >>> x.ravel()[np.flatnonzero(x)] array([-2, -1, 1, 2]) """<if_stmt>a.dtype<is><not>numpy.bool<block_start>a=a<ne>0<block_end>new_indexes=array_create.arange(a.size dtype=numpy.uint64)<line_sep><return>pack(new_indexes a)<block_end>@fix_biclass_wrapper<def_stmt>nonzero a<block_start>""" Return the indices of the elements that are non-zero. Returns a tuple of arrays, one for each dimension of `a`, containing the indices of the non-zero elements in that dimension. The values in `a` are always tested and returned in row-major, C-style order. The corresponding non-zero values can be obtained with:: a[nonzero(a)] To group the indices by element, rather than dimension, use:: transpose(nonzero(a)) The result of this is always a 2-D array, with a row for each non-zero element. Parameters ---------- a : array_like Input array. Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- flatnonzero : Return indices that are non-zero in the flattened version of the input array. ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> x = np.eye(3) >>> x array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> np.nonzero(x) (array([0, 1, 2]), array([0, 1, 2])) >>> x[np.nonzero(x)] array([ 1., 1., 1.]) >>> np.transpose(np.nonzero(x)) array([[0, 0], [1, 1], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, np.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 array([[False, False, False], [ True, True, True], [ True, True, True]], dtype=bool) >>> np.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the boolean array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """<if_stmt>a.ndim<eq>1<block_start><return>(flatnonzero(a) )<block_end><if_stmt><not>a.flags['C_CONTIGUOUS']<block_start>a=a.copy(order='C')<block_end>nz=flatnonzero(a)<line_sep>ret=[]<for_stmt>stride_in_bytes a.strides<block_start>stride=stride_in_bytes<floordiv>a.itemsize<assert_stmt>stride_in_bytes%a.itemsize<eq>0<line_sep>tmp=nz<floordiv>stride<line_sep>ret.append(tmp)<line_sep>nz<augsub>tmp<times>stride<block_end><return>tuple(ret)<block_end>
""" Copyright (c) 2021, NVIDIA CORPORATION. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>sys os<line_sep>sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)) "../../unit_test/test_scripts/tf2/")))<import_from_stmt>utils *<line_sep>sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)) "DenseDemo/")))<import_from_stmt>models SOKDenseDemo<def_stmt>TFDataset filename batchsize as_sparse_tensor repeat=1<block_start>samples,labels=restore_from_file(filename)<line_sep>dataset=tf_dataset(keys=samples labels=labels batchsize=batchsize to_sparse_tensor=as_sparse_tensor repeat=repeat)<del_stmt>samples<del_stmt>labels<line_sep><return>dataset<block_end><def_stmt>get_dataset global_batch_size read_batchsize iter_num=10 vocabulary_size=1024 slot_num=10 max_nnz=5 use_sparse_mask=<false> repeat=1<block_start>random_samples,ramdom_labels=generate_random_samples(num_of_samples=global_batch_size<times>iter_num vocabulary_size=vocabulary_size slot_num=slot_num max_nnz=max_nnz use_sparse_mask=use_sparse_mask)<line_sep>dataset=tf_dataset(keys=random_samples labels=ramdom_labels batchsize=read_batchsize to_sparse_tensor=use_sparse_mask repeat=repeat)<line_sep><return>dataset<block_end>
<def_stmt>Fun <block_start><pass><block_end><class_stmt>A<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>Fun self<block_start><pass><block_end><block_end><try_stmt><block_start>print(Fun.__name__)<line_sep>print(A.__init__.__name__)<line_sep>print(A.Fun.__name__)<line_sep>print(A().Fun.__name__)<block_end><except_stmt>AttributeError<block_start>print('SKIP')<block_end>
<import_from_future_stmt> print_function division<import_from_stmt>time sleep<import_stmt>pymongo<import_from_stmt>monary Monary<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>neuralnilm.consts DATA_FOLD_NAMES<import_from_stmt>neuralnilm.utils get_colors<import_from_stmt>neuralnilm.config config<class_stmt>Monitor(object)<block_start><def_stmt>__init__ self experiment_id output_path='.' update_period=1 max_num_lines=1000 mongo_db='neuralnilm' mongo_host=<none><block_start>""" Parameters ---------- max_num_lines : int Number of pixels. """<line_sep>self.experiment_id=experiment_id<line_sep>self.output_path=output_path<line_sep>self.update_period=update_period<line_sep>self.max_num_lines=max_num_lines<line_sep>self._last_iteration_processed={'train':0 'validation':0}<if_stmt>mongo_host<is><none><block_start>self.mongo_host=config.get("MongoDB" "address")<block_end><else_stmt><block_start>self.mongo_host=mongo_host<block_end>self.mongo_client=pymongo.MongoClient(self.mongo_host)<line_sep>self.db=self.mongo_client[mongo_db]<line_sep>self.mongo_db=mongo_db<line_sep>self._validation_metric_names=<none><block_end><def_stmt>start self<block_start><while_stmt><true><block_start><if_stmt>self._new_scores_available('train')<block_start>self._plot_train_scores()<block_end><if_stmt>self._new_scores_available('validation')<block_start>self._plot_validation_scores()<block_end>sleep(self.update_period)<block_end><block_end><def_stmt>_new_costs_available self train_or_validation<block_start>"""Returns True if new training costs are available from DB. Parameters ---------- train_or_validation : str, {'train', 'validation'} """<line_sep>collection=self.db[train_or_validation+'_scores']<line_sep>document=collection.find_one(filter={'experiment_id':self.experiment_id 'iteration':{'$gt':self._last_iteration_processed[train_or_validation]}})<line_sep><return>bool(document)<block_end><def_stmt>_get_validation_mse self<block_start>monary=Monary(host=self.mongo_host)<def_stmt>get_mse_for_fold fold<block_start>iterations,loss,source_id=monary.query(db=self.mongo_db coll='validation_scores' query={'experiment_id':self.experiment_id 'fold':fold} fields=['iteration' 'scores.regression.mean_squared_error' 'source_id'] types=['int32' 'float32' 'int8'])<line_sep>scores_df=pd.DataFrame({'loss':loss 'source_id':source_id} index=iterations)<line_sep>scores_df=scores_df.sort_index()<line_sep><return>scores_df<block_end>FOLDS=['unseen_appliances' 'unseen_activations_of_seen_appliances']<line_sep>scores={}<for_stmt>fold FOLDS<block_start>scores[fold]=get_mse_for_fold(fold)<block_end><return>scores<block_end><def_stmt>_get_train_costs self# Get train scores <block_start>monary=Monary(host=self.mongo_host)<line_sep>iterations,loss,source_id=monary.query(db=self.mongo_db coll='train_scores' query={'experiment_id':self.experiment_id} fields=['iteration' 'loss' 'source_id'] types=['int32' 'float32' 'int8'])<line_sep>scores_df=pd.DataFrame({'loss':loss 'source_id':source_id} index=iterations)<line_sep>scores_df=scores_df.sort_index()<line_sep><return>scores_df<block_end><def_stmt>_plot_train_scores self<block_start>train_scores_df=self._get_train_costs()<line_sep>all_scores=self._get_validation_mse()<line_sep>all_scores.update({'train':train_scores_df})<line_sep>fig,ax=plt.subplots(1)<line_sep>source_names=self.source_names<for_stmt>fold,scores_df all_scores.iteritems()<block_start>sources=scores_df['source_id'].unique()<for_stmt>source_i sources# Get losses for just this source <block_start>mask=scores_df['source_id']<eq>source_i<line_sep>loss=scores_df[mask]['loss']<line_sep># Downsample if necessary loss_for_source=self._downsample(loss)<line_sep># Plot ax.plot(loss_for_source.index loss_for_source.values label='{} : {}'.format(fold source_names[source_i]))<block_end><block_end>ax.legend()<line_sep>plt.title('Training costs')<line_sep>ax.set_xlabel('Iteration')<line_sep>ax.set_ylabel('Mean squared error')<line_sep>plt.show()<try_stmt><block_start>self._last_iteration_processed['train']=train_scores_df.index[-1]<block_end><except_stmt>IndexError# No data loaded <block_start><pass><block_end><block_end>@property<def_stmt>validation_metric_names self<block_start>""" Returns ------- metric_names : list e.g. ['regression.mean_squared_error', 'classification_2_state.f1_score'] """<if_stmt>self._validation_metric_names<is><none><block_start>scores=self.db.validation_scores.find_one(filter={'experiment_id':self.experiment_id})['scores']<line_sep>self._validation_metric_names=[]<for_stmt>metric_type,metrics scores.iteritems()<block_start><for_stmt>metric_name metrics<block_start>self._validation_metric_names.append(metric_type+'.'+metric_name)<block_end><block_end><block_end><return>self._validation_metric_names<block_end>@property<def_stmt>source_names self<block_start>""" Returns ------- source_names : dict """<line_sep>metadata=self.db.experiments.find_one({'_id':self.experiment_id})<line_sep>sources=metadata['data']['pipeline']['sources']<line_sep>source_names={int(i):sources[i]['name']<for>i sources}<line_sep><return>source_names<block_end><def_stmt>_plot_validation_scores self<block_start>validation_sources=self.db.validation_scores.distinct(key='source_id' filter={'experiment_id':self.experiment_id})<line_sep>validation_sources.sort()<line_sep>num_cols=len(validation_sources)<line_sep>fig,axes=plt.subplots(nrows=3 ncols=num_cols sharex="col" sharey=<true> squeeze=<false>)<line_sep>fig.patch.set_facecolor('white')<line_sep>source_names=self.source_names<for_stmt>col,source_id enumerate(validation_sources)<block_start><for_stmt>row,fold enumerate(DATA_FOLD_NAMES)<block_start>ax=axes[row col]<line_sep>self._plot_validation_scores_for_source_and_fold(ax=ax source_id=source_id fold=fold show_axes_labels=(row<eq>0) show_scales=(col<eq>num_cols-1))<if_stmt>row<eq>0<block_start>ax.set_title(source_names[source_id] position=(.5 1.05))<block_end><elif_stmt>row<eq>2<block_start>ax.set_xlabel('Iteration' labelpad=10)<block_end><if_stmt>col<eq>0<block_start>ax.set_ylabel(fold.replace("_" " ").title() labelpad=10)<block_end>ax.patch.set_facecolor((0.95 0.95 0.95))<block_end><block_end>plt.subplots_adjust(top=0.91 bottom=0.05 left=0.03 right=0.7 hspace=0.15 wspace=0.1)<line_sep>plt.show()<block_end><def_stmt>_plot_validation_scores_for_source_and_fold self ax source_id fold show_axes_labels show_scales<block_start>fields=['iteration']+['scores.'+metric_name<for>metric_name self.validation_metric_names]<line_sep>monary=Monary(host=self.mongo_host)<line_sep>result=monary.query(db=self.mongo_db coll='validation_scores' query={'experiment_id':self.experiment_id 'source_id':source_id 'fold':fold} fields=fields types=['int32']+['float32']<times>len(self.validation_metric_names))<line_sep>index=result[0]<line_sep>data={metric_name:result[i+1]<for>i,metric_name enumerate(self.validation_metric_names)}<line_sep>df=pd.DataFrame(data index=index)<line_sep>df=df.sort_index()<line_sep>df=self._downsample(df)<line_sep># Create multiple independent axes. Adapted from <NAME>'s answer: # http://stackoverflow.com/a/7734614 # Colours n=len(self.validation_metric_names)<line_sep>colors=get_colors(n)<line_sep># Twin the x-axis to make independent y-axes. axes=[ax]<for_stmt>metric_name self.validation_metric_names[1:]<block_start>axes.append(ax.twinx())<block_end>SEP=0.2<if_stmt>show_scales<block_start><for_stmt>i,axis enumerate(axes)<block_start>axis.yaxis.tick_right()<if_stmt>i<ne>0# To make the border of the right-most axis visible, # we need to turn the frame on. This hides the other plots, # however, so we need to turn its fill off. <block_start>axis.set_frame_on(<true>)<line_sep>axis.patch.set_visible(<false>)<line_sep># Move the last y-axes spines over to the right. axis.spines['right'].set_position(('axes' 1+(SEP<times>i)))<block_end><block_end><block_end><else_stmt><block_start><for_stmt>axis axes<block_start>axis.tick_params(labelright=<false> labelleft=<false>)<line_sep>axis.yaxis.set_ticks_position('none')<line_sep>axis.spines['right'].set_visible(<false>)<block_end><block_end><for_stmt>axis axes<block_start><for_stmt>spine ['top' 'left' 'bottom']<block_start>axis.spines[spine].set_visible(<false>)<block_end>axis.xaxis.set_ticks_position('none')<block_end>lines=[]<for_stmt>i,(axis metric_name color) enumerate(zip(axes self.validation_metric_names colors))<block_start>axis.tick_params(axis='y' colors=color direction='out')<line_sep>label=metric_name.replace("regression." "")<line_sep>label=label.replace("classification_" "")<line_sep>label=label.replace("_" " ")<line_sep>label=label.replace("." " ")<line_sep>label=label.replace(" " "\n")<line_sep>line,=axis.plot(df.index df[metric_name].values color=color label=label)<if_stmt>show_axes_labels<and>show_scales<block_start>axis.set_ylabel(label color=color rotation=0 fontsize=8 va='bottom')<if_stmt>i<eq>0<block_start>coords=(1.05 1.1)<block_end><else_stmt><block_start>coords=(1.05+(SEP<times>i) 1.1)<block_end>axis.yaxis.set_label_coords(*coords)<block_end>lines.append(line)<block_end>self._last_iteration_processed['validation']=index[-1]<line_sep><return>lines<block_end><def_stmt>_downsample self data<block_start>"""Downsample `data` if necessary."""<if_stmt>len(data)<g>self.max_num_lines<block_start>divisor=int(np.ceil(len(data)/self.max_num_lines))<line_sep>data=data.groupby(<lambda>x:x<floordiv>divisor).mean()<line_sep>data.index<augmul>divisor<block_end><return>data<block_end><block_end>
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>typing List Text Tuple<def_stmt>line2matrix line:Text n:int m:int<arrow>Tuple[np.ndarray np.ndarray]<block_start>''' converts alignemnt given in the format "0-1 3p4 5-6" to alignment matrices n, m: maximum length of the involved sentences (i.e., dimensions of the alignemnt matrices) '''<def_stmt>convert i j<block_start>i,j=int(i) int(j)<if_stmt>i<ge>n<or>j<ge>m<block_start><raise>ValueError("Error in Gold Standard?")<block_end><return>i j<block_end>possibles=np.zeros((n m))<line_sep>sures=np.zeros((n m))<for_stmt>elem line.split(" ")<block_start><if_stmt>"p"<in>elem<block_start>i,j=convert(*elem.split("p"))<line_sep>possibles[i j]=1<block_end><elif_stmt>"-"<in>elem<block_start>i,j=convert(*elem.split("-"))<line_sep>possibles[i j]=1<line_sep>sures[i j]=1<block_end><block_end><return>sures possibles<block_end><def_stmt>plot_alignments e:List[Text] f:List[Text] sures:np.ndarray possibles:np.ndarray alignment1:np.ndarray alignment2:np.ndarray=<none> title:Text=<none> filename:Text=<none><block_start>shorter=min(len(e) len(f))<line_sep>scalefactor=min((4/shorter) 1)<line_sep>groundtruth=0.75<times>sures+0.4<times>possibles<line_sep>fig,ax=plt.subplots()<line_sep>im=ax.imshow(groundtruth cmap="Greens" vmin=0 vmax=1.5)<line_sep># show all ticks... ax.set_xticks(np.arange(len(f)))<line_sep>ax.set_yticks(np.arange(len(e)))<line_sep># ... and label them ax.set_xticklabels(f fontsize=25<times>scalefactor)<line_sep>ax.set_yticklabels(e fontsize=25<times>scalefactor)<for_stmt>edge,spine ax.spines.items()<block_start>spine.set_visible(<false>)<block_end>ax.tick_params(top=<true> bottom=<false> labeltop=<true> labelbottom=<false>)<line_sep># Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels() rotation=30 ha="left" rotation_mode="default")<line_sep>plt.setp(ax.get_yticklabels() rotation=0 ha="right" rotation_mode="anchor")<line_sep>ax.set_xticks(np.arange(groundtruth.shape[1]+1)-.5 minor=<true>)<line_sep>ax.set_yticks(np.arange(groundtruth.shape[0]+1)-.5 minor=<true>)<line_sep># set grid ax.grid(which="minor" color="black" linestyle='-' linewidth=1)<line_sep>ax.tick_params(which="minor" bottom=<false> left=<false>)<line_sep># Loop over data dimensions and create text annotations. circle=dict(boxstyle="circle,pad=0.3" fc=(0 0 0 0.0) ec="black" lw=3)<line_sep>roundthing=dict(boxstyle="square,pad=0.3" fc="black" ec=(0 0 0 0.0) lw=2)<line_sep># plot alignments <for_stmt>i range(len(e))<block_start><for_stmt>j range(len(f))<block_start><if_stmt>alignment1[i j]<g>0<block_start>t=ax.text(j i "x" ha="center" va="center" size=25<times>scalefactor bbox=circle color=(0 0 0 0.0))<block_end><if_stmt>alignment2<is><not><none><and>alignment2[i j]<g>0<block_start>t=ax.text(j i "x" ha="center" va="center" size=12<times>scalefactor bbox=roundthing color=(0 0 0 0.0))<block_end><block_end><block_end><if_stmt>title<block_start>ax.set_title(title)<block_end>fig.tight_layout()<if_stmt>filename<block_start>plt.savefig(filename)<block_end><else_stmt><block_start>plt.show()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>line2matrix("0-0 1p1 2-1" 3 2)<line_sep>plot_alignments(["Testing" "this" "."] ["Hier" "wird" "getestet" "."] np.array([[0 0 1 0] [0 0 0 0] [0 0 0 1]]) np.array([[0 0 0 0] [1 0 0 0] [0 0 0 0]]) np.array([[0 1 0 0] [0 0 0 0] [0 1 0 0]]) np.array([[0 0 0 1] [0 0 0 0] [0 0 0 0]]) "Example")<block_end>
<import_from_stmt>pubnub.endpoints.objects_v2.objects_endpoint ObjectsEndpoint IncludeCustomEndpoint UuidEndpoint<import_from_stmt>pubnub.enums PNOperationType<import_from_stmt>pubnub.enums HttpMethod<import_from_stmt>pubnub.models.consumer.objects_v2.uuid PNGetUUIDMetadataResult<class_stmt>GetUuid(ObjectsEndpoint UuidEndpoint IncludeCustomEndpoint)<block_start>GET_UID_PATH="/v2/objects/%s/uuids/%s"<def_stmt>__init__ self pubnub<block_start>ObjectsEndpoint.__init__(self pubnub)<line_sep>UuidEndpoint.__init__(self)<line_sep>IncludeCustomEndpoint.__init__(self)<block_end><def_stmt>build_path self<block_start><return>GetUuid.GET_UID_PATH%(self.pubnub.config.subscribe_key self._effective_uuid())<block_end><def_stmt>validate_specific_params self<block_start>self._validate_uuid()<block_end><def_stmt>create_response self envelope<block_start><return>PNGetUUIDMetadataResult(envelope)<block_end><def_stmt>operation_type self<block_start><return>PNOperationType.PNGetUuidMetadataOperation<block_end><def_stmt>name self<block_start><return>"Get UUID"<block_end><def_stmt>http_method self<block_start><return>HttpMethod.GET<block_end><block_end>
<import_stmt>heterocl<as>hcl<import_stmt>heterocl.tvm<as>tvm<import_stmt>numpy<as>np<import_stmt>numpy.testing<as>tst<import_stmt>hlib<line_sep>dtype=hcl.Float(64)<line_sep>_sum=hcl.reducer(0 <lambda>x y:x+y dtype)<line_sep>_max=hcl.reducer(-100000 <lambda>x y:tvm.make.Max(x y) dtype)<line_sep>_min=hcl.reducer(100000 <lambda>x y:tvm.make.Min(x y) dtype)<line_sep>_prod=hcl.reducer(1 <lambda>x y:x<times>y dtype)<def_stmt>test_exp <block_start><def_stmt>_test in_shape<block_start>hcl.init(hcl.Float())<line_sep>data=hcl.placeholder(in_shape)<def_stmt>math_func data<block_start><return>hlib.op.math.exp(data)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=10<times>np.random.random(in_shape)-5<line_sep>out=hcl.asarray(np.zeros(in_shape).astype('float32'))<line_sep>real_out=np.exp(_in)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out 4)<block_end>_test((1 3))<line_sep>_test((3 3 3))<line_sep>_test((5 5 3 2))<block_end><def_stmt>test_log <block_start><def_stmt>_test in_shape<block_start>hcl.init(hcl.Float())<line_sep>data=hcl.placeholder(in_shape)<def_stmt>math_func data<block_start><return>hlib.op.math.log(data)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=10<times>np.random.random(in_shape)+1<line_sep>out=hcl.asarray(np.zeros(in_shape).astype('float32'))<line_sep>real_out=np.log(_in)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out 5)<block_end>_test((1 3))<line_sep>_test((3 3 3))<line_sep>_test((5 5 3 2))<block_end><def_stmt>test_sigmoid <block_start><def_stmt>_test in_shape<block_start>data=hcl.placeholder(in_shape)<def_stmt>math_func data<block_start><return>hlib.op.math.sigmoid(data)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=10<times>np.random.random(in_shape)-5<line_sep>out=hcl.asarray(np.zeros(in_shape).astype('float32'))<def_stmt>sigmoid data<block_start><return>1/(1+np.exp(-data))<block_end>real_out=sigmoid(_in)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out 5)<block_end>_test((1 3))<line_sep>_test((3 3 3))<line_sep>_test((5 5 3 2))<block_end><def_stmt>test_sqrt <block_start><def_stmt>_test in_shape<block_start>data=hcl.placeholder(in_shape)<def_stmt>math_func data<block_start><return>hlib.op.math.sqrt(data)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=100<times>np.random.random(in_shape)+1<line_sep>out=hcl.asarray(np.zeros(in_shape).astype('float32'))<line_sep>real_out=np.sqrt(_in)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out 5)<block_end>_test((1 3))<line_sep>_test((3 3 3))<line_sep>_test((5 5 3 2))<block_end><def_stmt>tanh_test <block_start><def_stmt>_test in_shape<block_start>hcl.init(hcl.Float())<line_sep>data=hcl.placeholder(in_shape)<def_stmt>math_func data<block_start><return>hlib.op.math.tanh(data)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=100<times>np.random.random(in_shape)-50<line_sep>out=hcl.asarray(np.zeros(in_shape).astype('float32'))<line_sep>real_out=np.tanh(_in)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out 5)<block_end>_test((1 3))<line_sep>_test((3 3 3))<line_sep>_test((5 5 3 2))<block_end><def_stmt>test_clip <block_start><def_stmt>_test in_shape x_min x_max<block_start>hcl.init(hcl.Float())<line_sep>data=hcl.placeholder(in_shape)<def_stmt>math_func data x_min=x_min x_max=x_max<block_start><return>hlib.op.math.clip(data a_min=x_min a_max=x_max)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=10<times>np.random.random(in_shape)-5<line_sep>out=hcl.asarray(np.zeros(in_shape).astype('float32'))<line_sep>real_out=np.clip(_in x_min x_max)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out)<block_end>_test((1 3) 0 4)<line_sep>_test((1 3 3) -4 4)<line_sep>_test((1 3) 0 4)<line_sep>_test((3 3) 0 0.01)<block_end><def_stmt>test_sum <block_start><def_stmt>_test in_shape axis=<none> keepdims=<false><block_start>hcl.init()<line_sep>new_shape=[]<if_stmt>axis<is><none><block_start><for_stmt>i range(len(in_shape))<block_start>new_shape.append(1)<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(axis int)<block_start><if_stmt>axis<l>0<block_start>axis=len(in_shape)+axis<block_end>axis=[axis]<block_end><for_stmt>i range(len(in_shape))<block_start><if_stmt>i<in>axis<block_start>new_shape.append(1)<block_end><else_stmt><block_start>new_shape.append(in_shape[i])<block_end><block_end><block_end>data=hcl.placeholder(in_shape)<def_stmt>math_func data axis=axis keepdims=keepdims<block_start><return>hlib.op.math.sum(data axis keepdims)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=np.random.randint(10 size=in_shape)<if_stmt>keepdims<block_start>out=hcl.asarray(np.zeros(new_shape))<block_end><else_stmt><block_start>out=hcl.asarray(np.squeeze(np.zeros(new_shape)))<block_end>f(hcl.asarray(_in) out)<line_sep>real_out=np.sum(_in axis=axis keepdims=keepdims)<line_sep>tst.assert_almost_equal(real_out out.asnumpy())<block_end>_test((3 3) axis=(0 ))<line_sep>_test((3 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(0 ))<line_sep>_test((2 2 2) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(1 ))<line_sep>_test((2 2 2) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(2 ))<line_sep>_test((2 2 2) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 ))<line_sep>_test((2 2 2 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(1 ))<line_sep>_test((2 2 2 3) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(2 ))<line_sep>_test((2 2 2 3) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(3 ))<line_sep>_test((2 2 2 3) axis=(3 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 1))<line_sep>_test((2 2 2 3) axis=(0 1) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 2))<line_sep>_test((2 2 2 3) axis=(0 2) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(3 ))<line_sep>_test((5 2 4 3) axis=(3 ) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 1))<line_sep>_test((5 2 4 3) axis=(0 1) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 2))<line_sep>_test((5 2 4 3) axis=(0 2) keepdims=<true>)<block_end><def_stmt>test_max <block_start><def_stmt>_test in_shape axis=<none> keepdims=<true><block_start>hcl.init()<line_sep>new_shape=[]<if_stmt>axis<is><none><block_start><for_stmt>i range(len(in_shape))<block_start>new_shape.append(1)<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(axis int)<block_start><if_stmt>axis<l>0<block_start>axis=len(in_shape)+axis<block_end>axis=[axis]<block_end><for_stmt>i range(len(in_shape))<block_start><if_stmt>i<in>axis<block_start>new_shape.append(1)<block_end><else_stmt><block_start>new_shape.append(in_shape[i])<block_end><block_end><block_end>data=hcl.placeholder(in_shape)<def_stmt>math_func data axis=axis keepdims=keepdims<block_start><return>hlib.op.math.max(data axis keepdims)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=np.random.randint(10 size=in_shape)<if_stmt>keepdims<block_start>out=hcl.asarray(np.zeros(new_shape))<block_end><else_stmt><block_start>out=hcl.asarray(np.squeeze(np.zeros(new_shape)))<block_end>real_out=np.amax(_in tuple(axis) keepdims=keepdims)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out)<block_end>_test((3 3) axis=(0 ))<line_sep>_test((3 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(0 ))<line_sep>_test((2 2 2) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(1 ))<line_sep>_test((2 2 2) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(2 ))<line_sep>_test((2 2 2) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 ))<line_sep>_test((2 2 2 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(1 ))<line_sep>_test((2 2 2 3) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(2 ))<line_sep>_test((2 2 2 3) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(3 ))<line_sep>_test((2 2 2 3) axis=(3 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 1))<line_sep>_test((2 2 2 3) axis=(0 1) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 2))<line_sep>_test((2 2 2 3) axis=(0 2) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(3 ))<line_sep>_test((5 2 4 3) axis=(3 ) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 1))<line_sep>_test((5 2 4 3) axis=(0 1) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 2))<line_sep>_test((5 2 4 3) axis=(0 2) keepdims=<true>)<block_end><def_stmt>test_prod <block_start><def_stmt>_test in_shape axis=<none> keepdims=<true><block_start>hcl.init(hcl.Float())<line_sep>new_shape=[]<if_stmt>axis<is><none><block_start><for_stmt>i range(len(in_shape))<block_start>new_shape.append(1)<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(axis int)<block_start><if_stmt>axis<l>0<block_start>axis=len(in_shape)+axis<block_end>axis=[axis]<block_end><for_stmt>i range(len(in_shape))<block_start><if_stmt>i<in>axis<block_start>new_shape.append(1)<block_end><else_stmt><block_start>new_shape.append(in_shape[i])<block_end><block_end><block_end>data=hcl.placeholder(in_shape)<def_stmt>math_func data axis=axis keepdims=keepdims<block_start><return>hlib.op.math.prod(data axis keepdims)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=np.random.random(size=in_shape)<if_stmt>keepdims<block_start>out=hcl.asarray(np.zeros(new_shape))<block_end><else_stmt><block_start>out=hcl.asarray(np.squeeze(np.zeros(new_shape)))<block_end>real_out=np.prod(_in tuple(axis) keepdims=keepdims)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out)<block_end>_test((3 3) axis=(0 ))<line_sep>_test((3 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(0 ))<line_sep>_test((2 2 2) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(1 ))<line_sep>_test((2 2 2) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(2 ))<line_sep>_test((2 2 2) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 ))<line_sep>_test((2 2 2 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(1 ))<line_sep>_test((2 2 2 3) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(2 ))<line_sep>_test((2 2 2 3) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(3 ))<line_sep>_test((2 2 2 3) axis=(3 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 1))<line_sep>_test((2 2 2 3) axis=(0 1) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 2))<line_sep>_test((2 2 2 3) axis=(0 2) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(3 ))<line_sep>_test((5 2 4 3) axis=(3 ) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 1))<line_sep>_test((5 2 4 3) axis=(0 1) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 2))<line_sep>_test((5 2 4 3) axis=(0 2) keepdims=<true>)<block_end><def_stmt>test_min <block_start><def_stmt>_test in_shape axis=<none> keepdims=<true><block_start>hcl.init()<line_sep>new_shape=[]<if_stmt>axis<is><none><block_start><for_stmt>i range(len(in_shape))<block_start>new_shape.append(1)<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(axis int)<block_start><if_stmt>axis<l>0<block_start>axis=len(in_shape)+axis<block_end>axis=[axis]<block_end><for_stmt>i range(len(in_shape))<block_start><if_stmt>i<in>axis<block_start>new_shape.append(1)<block_end><else_stmt><block_start>new_shape.append(in_shape[i])<block_end><block_end><block_end>data=hcl.placeholder(in_shape)<def_stmt>math_func data axis=axis keepdims=keepdims<block_start><return>hlib.op.math.min(data axis keepdims)<block_end>s=hcl.create_schedule(data math_func)<line_sep>f=hcl.build(s)<line_sep>_in=np.random.randint(10 size=in_shape)<if_stmt>keepdims<block_start>out=hcl.asarray(np.zeros(new_shape))<block_end><else_stmt><block_start>out=hcl.asarray(np.squeeze(np.zeros(new_shape)))<block_end>real_out=np.amin(_in tuple(axis) keepdims=keepdims)<line_sep>f(hcl.asarray(_in) out)<line_sep>tst.assert_almost_equal(out.asnumpy() real_out)<block_end>_test((3 3) axis=(0 ))<line_sep>_test((3 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(0 ))<line_sep>_test((2 2 2) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(1 ))<line_sep>_test((2 2 2) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2) axis=(2 ))<line_sep>_test((2 2 2) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 ))<line_sep>_test((2 2 2 3) axis=(0 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(1 ))<line_sep>_test((2 2 2 3) axis=(1 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(2 ))<line_sep>_test((2 2 2 3) axis=(2 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(3 ))<line_sep>_test((2 2 2 3) axis=(3 ) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 1))<line_sep>_test((2 2 2 3) axis=(0 1) keepdims=<true>)<line_sep>_test((2 2 2 3) axis=(0 2))<line_sep>_test((2 2 2 3) axis=(0 2) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(3 ))<line_sep>_test((5 2 4 3) axis=(3 ) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 1))<line_sep>_test((5 2 4 3) axis=(0 1) keepdims=<true>)<line_sep>_test((5 2 4 3) axis=(0 2))<line_sep>_test((5 2 4 3) axis=(0 2) keepdims=<true>)<block_end>
<import_from_stmt>skimage data<import_from_stmt>skimage.viewer CollectionViewer<import_from_stmt>skimage.transform pyramid_gaussian<line_sep>img=data.lena()<line_sep>img_collection=tuple(pyramid_gaussian(img))<line_sep>view=CollectionViewer(img_collection)<line_sep>view.show()<line_sep>
<import_from_stmt>app.tests test_kernel<line_sep>@test_kernel.container.register('another_test_plugin' tags=['plugin'])<class_stmt>AnotherTestPlugin(object)<block_start><pass><block_end>
<import_from_future_stmt> absolute_import<import_stmt>numpy<as>np<import_from_stmt>targets.marshalling.marshaller Marshaller<import_from_stmt>targets.target_config FileFormat<class_stmt>NumpyArrayMarshaller(Marshaller)<block_start>type=np.ndarray<line_sep>file_format=FileFormat.numpy<def_stmt>target_to_value self target **kwargs<block_start>""" :param obj: object to pickle :return: """<with_stmt>target.open("rb")<as>fp<block_start><return>np.load(fp **kwargs)<block_end><block_end><def_stmt>value_to_target self value target **kwargs<block_start>""" :param obj: object to pickle :return: """<line_sep>target.mkdir_parent()<with_stmt>target.open("wb")<as>fp<block_start>np.save(fp value **kwargs)<block_end><block_end><block_end><class_stmt>NumpyArrayPickleMarshaler(NumpyArrayMarshaller)<block_start>file_format=FileFormat.pickle<def_stmt>target_to_value self target **kwargs<block_start><return>np.load(target.path allow_pickle=<true> **kwargs)<block_end><def_stmt>value_to_target self value target **kwargs<block_start>np.save(target.path value allow_pickle=<true> **kwargs)<block_end><block_end>
<import_stmt>esphome.codegen<as>cg<import_stmt>esphome.config_validation<as>cv<import_from_stmt>esphome.components light output<import_from_stmt>esphome.const CONF_OUTPUT_ID CONF_OUTPUT<line_sep>monochromatic_ns=cg.esphome_ns.namespace("monochromatic")<line_sep>MonochromaticLightOutput=monochromatic_ns.class_("MonochromaticLightOutput" light.LightOutput)<line_sep>CONFIG_SCHEMA=light.BRIGHTNESS_ONLY_LIGHT_SCHEMA.extend({cv.GenerateID(CONF_OUTPUT_ID):cv.declare_id(MonochromaticLightOutput) cv.Required(CONF_OUTPUT):cv.use_id(output.FloatOutput) })<async_keyword><def_stmt>to_code config<block_start>var=cg.new_Pvariable(config[CONF_OUTPUT_ID])<line_sep><await>light.register_light(var config)<line_sep>out=<await>cg.get_variable(config[CONF_OUTPUT])<line_sep>cg.add(var.set_output(out))<block_end>
<import_stmt>argparse<import_stmt>typing<import_stmt>aztk.spark<import_from_stmt>aztk_cli config<import_from_stmt>aztk_cli.config JobConfig<def_stmt>setup_parser parser:argparse.ArgumentParser<block_start>parser.add_argument("--id" dest="job_id" required=<false> help="The unique id of your Spark Job. Defaults to the id value in .aztk/job.yaml" )<line_sep>parser.add_argument("--configuration" "-c" dest="job_conf" required=<false> help="Path to the job.yaml configuration file. Defaults to .aztk/job.yaml" )<block_end><def_stmt>execute args:typing.NamedTuple<block_start>spark_client=aztk.spark.Client(config.load_aztk_secrets())<line_sep>job_conf=JobConfig()<line_sep>job_conf.merge(args.job_id args.job_conf)<line_sep># by default, load spark configuration files in .aztk/ spark_configuration=config.load_aztk_spark_config()<line_sep># overwrite with values in job_conf if they exist <if_stmt>job_conf.spark_defaults_conf<block_start>spark_configuration.spark_defaults_conf=job_conf.spark_defaults_conf<block_end><if_stmt>job_conf.spark_env_sh<block_start>spark_configuration.spark_env_sh=job_conf.spark_env_sh<block_end><if_stmt>job_conf.core_site_xml<block_start>spark_configuration.core_site_xml=job_conf.core_site_xml<block_end>job_configuration=aztk.spark.models.JobConfiguration(id=job_conf.id applications=job_conf.applications spark_configuration=spark_configuration vm_size=job_conf.vm_size toolkit=job_conf.toolkit max_dedicated_nodes=job_conf.max_dedicated_nodes max_low_pri_nodes=job_conf.max_low_pri_nodes subnet_id=job_conf.subnet_id worker_on_master=job_conf.worker_on_master scheduling_target=job_conf.scheduling_target )<line_sep># TODO: utils.print_job_conf(job_configuration) spark_client.job.submit(job_configuration)<block_end>
""" change comments on black jack record """<import_from_stmt>yoyo step<line_sep>__depends__={'20211103_04_Y0xbO-remove-uuid-s-primary-key-on-black-jack-record'}<line_sep>steps=[step("ALTER TABLE `blackJackGameRecord` CHANGE `status` `status` INT NOT NULL COMMENT '0 represent in progress; 1 represent lose; 2 represent win; 3 represent draw; 4 represent closed; ';")]<line_sep>
<import_from_stmt>geosnap analyze<line_sep>linc=analyze.incs.linc<def_stmt>test_linc <block_start>labels_0=[1 1 1 1 2 2 3 3 3 4]<line_sep>labels_1=[1 1 1 1 1 2 3 3 3 4]<line_sep>res=linc([labels_0 labels_1])<assert_stmt>res[4]<eq>1.0<assert_stmt>res[7]<eq>0.0<eq>res[-1]<line_sep>labels_2=[1 1 1 1 1 2 3 3 3 4]<line_sep>res=linc([labels_1 labels_2])<assert_stmt>res[0]<eq>0.0<line_sep>res=linc([labels_0 labels_1 labels_2])<assert_stmt>res[0]<eq>0.25<block_end>
<import_stmt>tarfile<import_stmt>os<line_sep>os.mkdir('work')<with_stmt>tarfile.open('work.tar' 'r')<as>t<block_start>t.extractall('work')<block_end>print(os.listdir('work'))<line_sep>
# Python Standard Library Imports <import_stmt>time<import_from_stmt>rauth OAuth1Service<import_from_stmt>rauth.service process_token_request<import_from_stmt>rauth.utils parse_utf8_qsl<line_sep>YAHOO_OAUTH_REQUEST_TOKEN_URL='https://api.login.yahoo.com/oauth/v2/get_request_token'<line_sep>YAHOO_OAUTH_ACCESS_TOKEN_URL='https://api.login.yahoo.com/oauth/v2/get_token'<line_sep>YAHOO_OAUTH_AUTHORIZE_URL='https://api.login.yahoo.com/oauth/v2/request_auth'<def_stmt>refresh_token_if_needed func<block_start>"""Decorator to make sure we refresh the token if needed before every query """<def_stmt>keys_from_response text<block_start>return_array=[]<line_sep>response_array=text.split('&')<for_stmt>e response_array<block_start>pair=e.split('=' 2)<line_sep>return_array.append(pair[0])<block_end><return>return_array<block_end><def_stmt>refresh self *args **kwargs<block_start>""" `self` is an instance of YahooOAuthClient """<line_sep># Let's refresh 5 minutes before the expiration time expires=self.user_social_auth.extra_data['expires']<line_sep>expires_time=int(expires)-300<if>expires<else>0<line_sep>now=int(time.time())<line_sep># print('comparing n: {0} vs expire: {1}'.format(now, expires)) <if_stmt>expires<is><none><or>expires<l>now#print('------ Refreshing Token ------') <block_start>r=self.oauth.get_raw_access_token(request_token=self.access_token['oauth_token'] request_token_secret=self.access_token['oauth_token_secret'] params={'oauth_session_handle':self.access_token['oauth_session_handle']} )<line_sep>keys=keys_from_response(r.text)<line_sep>access_token=process_token_request(r parse_utf8_qsl *keys)<for_stmt>i,k enumerate(keys)<block_start>self.access_token[k]=access_token[i]<block_end># Save back to UserSocialAuth Model self.user_social_auth.extra_data['access_token']=self.access_token<line_sep>current_time=int(time.time())<line_sep>self.user_social_auth.extra_data['expires']=current_time+int(self.access_token['oauth_expires_in'])<line_sep># print('current time: {0}, expiring oauth at {1}'.format(current_time, self.user_social_auth.extra_data['expires'])) self.user_social_auth.save()<line_sep>token=(self.access_token['oauth_token'] self.access_token['oauth_token_secret'])<line_sep>self.session=self.oauth.get_session(token)<block_end><return>func(self *args **kwargs)<block_end><return>refresh<block_end><class_stmt>YahooOAuthClient(object)<block_start><def_stmt>__init__ self app_key app_secret user_social_auth<block_start>"""Constructor for YahooOAuthClient `app_key` - Yahoo App Key `app_secret` - Yahoo App Secret `user_social_auth` - UserSocialAuth model to store refreshed token """<line_sep># UserSocialAuth needed to access the access token self.last_error=<none><line_sep>self.user_social_auth=user_social_auth<line_sep>self.access_token=user_social_auth.extra_data.get('access_token')<line_sep>self.oauth=OAuth1Service(name='Yahoo' consumer_key=app_key consumer_secret=app_secret request_token_url=YAHOO_OAUTH_REQUEST_TOKEN_URL access_token_url=YAHOO_OAUTH_ACCESS_TOKEN_URL authorize_url=YAHOO_OAUTH_AUTHORIZE_URL )<line_sep>self.session=self.oauth.get_session((self.access_token['oauth_token'] self.access_token['oauth_token_secret']))<block_end><block_end>
<import_stmt>json<import_from_stmt>urllib.parse urlencode<import_from_stmt>.oauth OAuth2Test<class_stmt>StackoverflowOAuth2Test(OAuth2Test)<block_start>backend_path='social_core.backends.stackoverflow.StackoverflowOAuth2'<line_sep>user_data_url='https://api.stackexchange.com/2.1/me'<line_sep>expected_username='foobar'<line_sep>access_token_body=urlencode({'access_token':'foobar' 'token_type':'bearer'})<line_sep>user_data_body=json.dumps({'items':[{'user_id':101010 'user_type':'registered' 'creation_date':1278525551 'display_name':'foobar' 'profile_image':'http: //www.gravatar.com/avatar/'<concat>'5280f15cedf540b544eecc30fcf3027c?'<concat>'d=identicon&r=PG' 'reputation':547 'reputation_change_day':0 'reputation_change_week':0 'reputation_change_month':0 'reputation_change_quarter':65 'reputation_change_year':65 'age':22 'last_access_date':1363544705 'last_modified_date':1354035327 'is_employee':<false> 'link':'http: //stackoverflow.com/users/101010/foobar' 'location':'Fooland' 'account_id':101010 'badge_counts':{'gold':0 'silver':3 'bronze':6}}] 'quota_remaining':9997 'quota_max':10000 'has_more':<false>})<def_stmt>test_login self<block_start>self.do_login()<block_end><def_stmt>test_partial_pipeline self<block_start>self.do_partial_pipeline()<block_end><block_end>
<import_stmt>sys<import_stmt>unittest<import_stmt>coverage<line_sep>cov=coverage.Coverage(branch=<true> source=['animation_retarget'] )<line_sep>cov.start()<line_sep>suite=unittest.defaultTestLoader.discover('.')<if_stmt><not>unittest.TextTestRunner().run(suite).wasSuccessful()<block_start>exit(1)<block_end>cov.stop()<line_sep>cov.xml_report()<if_stmt>'--save-html-report'<in>sys.argv<block_start>cov.html_report()<block_end>
#! /usr/bin/python3 <import_from_stmt>pyln.spec.bolt7 channel_announcement channel_update node_announcement <import_from_stmt>pyln.proto ShortChannelId PublicKey<import_from_stmt>typing Any Dict List Optional Union cast<import_stmt>io<import_stmt>struct<line_sep># These duplicate constants in lightning/common/gossip_store.h GOSSIP_STORE_VERSION=9<line_sep>GOSSIP_STORE_LEN_DELETED_BIT=0x80000000<line_sep>GOSSIP_STORE_LEN_PUSH_BIT=0x40000000<line_sep>GOSSIP_STORE_LEN_MASK=(~(GOSSIP_STORE_LEN_PUSH_BIT|GOSSIP_STORE_LEN_DELETED_BIT))<line_sep># These duplicate constants in lightning/gossipd/gossip_store_wiregen.h WIRE_GOSSIP_STORE_PRIVATE_CHANNEL=4104<line_sep>WIRE_GOSSIP_STORE_PRIVATE_UPDATE=4102<line_sep>WIRE_GOSSIP_STORE_DELETE_CHAN=4103<line_sep>WIRE_GOSSIP_STORE_ENDED=4105<line_sep>WIRE_GOSSIP_STORE_CHANNEL_AMOUNT=4101<class_stmt>GossipStoreHeader(object)<block_start><def_stmt>__init__ self buf:bytes<block_start>length,self.crc,self.timestamp=struct.unpack('>III' buf)<line_sep>self.deleted=(length&GOSSIP_STORE_LEN_DELETED_BIT)<ne>0<line_sep>self.length=(length&GOSSIP_STORE_LEN_MASK)<block_end><block_end><class_stmt>GossmapHalfchannel(object)<block_start>"""One direction of a GossmapChannel."""<def_stmt>__init__ self channel:'GossmapChannel' direction:int timestamp:int cltv_expiry_delta:int htlc_minimum_msat:int htlc_maximum_msat:int fee_base_msat:int fee_proportional_millionths:int<block_start>self.channel=channel<line_sep>self.direction=direction<line_sep>self.source=channel.node1<if>direction<eq>0<else>channel.node2<line_sep>self.destination=channel.node2<if>direction<eq>0<else>channel.node1<line_sep>self.timestamp:int=timestamp<line_sep>self.cltv_expiry_delta:int=cltv_expiry_delta<line_sep>self.htlc_minimum_msat:int=htlc_minimum_msat<line_sep>self.htlc_maximum_msat:Optional[int]=htlc_maximum_msat<line_sep>self.fee_base_msat:int=fee_base_msat<line_sep>self.fee_proportional_millionths:int=fee_proportional_millionths<block_end><def_stmt>__repr__ self<block_start><return>"GossmapHalfchannel[{}x{}]".format(str(self.channel.scid) self.direction)<block_end><block_end><class_stmt>GossmapNodeId(object)<block_start><def_stmt>__init__ self buf:Union[bytes str]<block_start><if_stmt>isinstance(buf str)<block_start>buf=bytes.fromhex(buf)<block_end><if_stmt>len(buf)<ne>33<or>(buf[0]<ne>2<and>buf[0]<ne>3)<block_start><raise>ValueError("{} is not a valid node_id".format(buf.hex()))<block_end>self.nodeid=buf<block_end><def_stmt>to_pubkey self<arrow>PublicKey<block_start><return>PublicKey(self.nodeid)<block_end><def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other GossmapNodeId)<block_start><return><false><block_end><return>self.nodeid.__eq__(other.nodeid)<block_end><def_stmt>__lt__ self other<block_start><if_stmt><not>isinstance(other GossmapNodeId)<block_start><raise>ValueError(f"Cannot compare GossmapNodeId with {type(other)}")<block_end><return>self.nodeid.__lt__(other.nodeid)<block_end># yes, that works <def_stmt>__hash__ self<block_start><return>self.nodeid.__hash__()<block_end><def_stmt>__repr__ self<block_start><return>"GossmapNodeId[{}]".format(self.nodeid.hex())<block_end>@classmethod<def_stmt>from_str cls s:str<block_start><if_stmt>s.startswith('0x')<block_start>s=s[2:]<block_end><if_stmt>len(s)<ne>66<block_start><raise>ValueError(f"{s} is not a valid hexstring of a node_id")<block_end><return>cls(bytes.fromhex(s))<block_end><block_end><class_stmt>GossmapChannel(object)<block_start>"""A channel: fields of channel_announcement are in .fields, optional updates are in .updates_fields, which can be None if there has been no channel update."""<def_stmt>__init__ self fields:Dict[str Any] announce_offset:int scid node1:'GossmapNode' node2:'GossmapNode' is_private:bool<block_start>self.fields=fields<line_sep>self.announce_offset=announce_offset<line_sep>self.is_private=is_private<line_sep>self.scid=scid<line_sep>self.node1=node1<line_sep>self.node2=node2<line_sep>self.updates_fields:List[Optional[Dict[str Any]]]=[<none> <none>]<line_sep>self.updates_offset:List[Optional[int]]=[<none> <none>]<line_sep>self.satoshis=<none><line_sep>self.half_channels:List[Optional[GossmapHalfchannel]]=[<none> <none>]<block_end><def_stmt>_update_channel self direction:int fields:Dict[str Any] off:int<block_start>self.updates_fields[direction]=fields<line_sep>self.updates_offset[direction]=off<line_sep>half=GossmapHalfchannel(self direction fields['timestamp'] fields['cltv_expiry_delta'] fields['htlc_minimum_msat'] fields.get('htlc_maximum_msat' <none>) fields['fee_base_msat'] fields['fee_proportional_millionths'])<line_sep>self.half_channels[direction]=half<block_end><def_stmt>get_direction self direction:int<block_start>""" returns the GossmapHalfchannel if known by channel_update """<if_stmt><not>0<le>direction<le>1<block_start><raise>ValueError("direction can only be 0 or 1")<block_end><return>self.half_channels[direction]<block_end><def_stmt>__repr__ self<block_start><return>"GossmapChannel[{}]".format(str(self.scid))<block_end><block_end><class_stmt>GossmapNode(object)<block_start>"""A node: fields of node_announcement are in .announce_fields, which can be None of there has been no node announcement. .channels is a list of the GossmapChannels attached to this node. """<def_stmt>__init__ self node_id:Union[GossmapNodeId bytes str]<block_start><if_stmt>isinstance(node_id bytes)<or>isinstance(node_id str)<block_start>node_id=GossmapNodeId(node_id)<block_end>self.announce_fields:Optional[Dict[str Any]]=<none><line_sep>self.announce_offset:Optional[int]=<none><line_sep>self.channels:List[GossmapChannel]=[]<line_sep>self.node_id=node_id<block_end><def_stmt>__repr__ self<block_start><return>"GossmapNode[{}]".format(self.node_id.nodeid.hex())<block_end><def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other GossmapNode)<block_start><return><false><block_end><return>self.node_id.__eq__(other.node_id)<block_end><def_stmt>__lt__ self other<block_start><if_stmt><not>isinstance(other GossmapNode)<block_start><raise>ValueError(f"Cannot compare GossmapNode with {type(other)}")<block_end><return>self.node_id.__lt__(other.node_id)<block_end><block_end><class_stmt>Gossmap(object)<block_start>"""Class to represent the gossip map of the network"""<def_stmt>__init__ self store_filename:str="gossip_store"<block_start>self.store_filename=store_filename<line_sep>self.store_file=open(store_filename "rb")<line_sep>self.store_buf=bytes()<line_sep>self.nodes:Dict[GossmapNodeId GossmapNode]={}<line_sep>self.channels:Dict[ShortChannelId GossmapChannel]={}<line_sep>self._last_scid:Optional[str]=<none><line_sep>version=self.store_file.read(1)<if_stmt>version[0]<ne>GOSSIP_STORE_VERSION<block_start><raise>ValueError("Invalid gossip store version {}".format(int(version)))<block_end>self.bytes_read=1<line_sep>self.refresh()<block_end><def_stmt>_new_channel self fields:Dict[str Any] announce_offset:int scid:ShortChannelId node1:GossmapNode node2:GossmapNode is_private:bool<block_start>c=GossmapChannel(fields announce_offset scid node1 node2 is_private)<line_sep>self._last_scid=scid<line_sep>self.channels[scid]=c<line_sep>node1.channels.append(c)<line_sep>node2.channels.append(c)<block_end><def_stmt>_del_channel self scid:ShortChannelId<block_start>c=self.channels[scid]<del_stmt>self.channels[scid]<line_sep>c.node1.channels.remove(c)<line_sep>c.node2.channels.remove(c)<line_sep># Beware self-channels n1-n1! <if_stmt>len(c.node1.channels)<eq>0<and>c.node1<ne>c.node2<block_start><del_stmt>self.nodes[c.node1.node_id]<block_end><if_stmt>len(c.node2.channels)<eq>0<block_start><del_stmt>self.nodes[c.node2.node_id]<block_end><block_end><def_stmt>_add_channel self rec:bytes off:int is_private:bool<block_start>fields=channel_announcement.read(io.BytesIO(rec[2:]) {})<line_sep># Add nodes one the fly node1_id=GossmapNodeId(fields['node_id_1'])<line_sep>node2_id=GossmapNodeId(fields['node_id_2'])<if_stmt>node1_id<not><in>self.nodes<block_start>self.nodes[node1_id]=GossmapNode(node1_id)<block_end><if_stmt>node2_id<not><in>self.nodes<block_start>self.nodes[node2_id]=GossmapNode(node2_id)<block_end>self._new_channel(fields off ShortChannelId.from_int(fields['short_channel_id']) self.get_node(node1_id) self.get_node(node2_id) is_private)<block_end><def_stmt>_set_channel_amount self rec:bytes<block_start>""" Sets channel capacity of last added channel """<line_sep>sats,=struct.unpack(">Q" rec[2:])<line_sep>self.channels[self._last_scid].satoshis=sats<block_end><def_stmt>get_channel self short_channel_id:ShortChannelId<block_start>""" Resolves a channel by its short channel id """<if_stmt>isinstance(short_channel_id str)<block_start>short_channel_id=ShortChannelId.from_str(short_channel_id)<block_end><return>self.channels.get(short_channel_id)<block_end><def_stmt>get_node self node_id:Union[GossmapNodeId str]<block_start>""" Resolves a node by its public key node_id """<if_stmt>isinstance(node_id str)<block_start>node_id=GossmapNodeId.from_str(node_id)<block_end><return>self.nodes.get(cast(GossmapNodeId node_id))<block_end><def_stmt>_update_channel self rec:bytes off:int<block_start>fields=channel_update.read(io.BytesIO(rec[2:]) {})<line_sep>direction=fields['channel_flags']&1<line_sep>c=self.channels[ShortChannelId.from_int(fields['short_channel_id'])]<line_sep>c._update_channel(direction fields off)<block_end><def_stmt>_add_node_announcement self rec:bytes off:int<block_start>fields=node_announcement.read(io.BytesIO(rec[2:]) {})<line_sep>node_id=GossmapNodeId(fields['node_id'])<line_sep>self.nodes[node_id].announce_fields=fields<line_sep>self.nodes[node_id].announce_offset=off<block_end><def_stmt>reopen_store self<block_start>"""FIXME: Implement!"""<assert_stmt><false><block_end><def_stmt>_remove_channel_by_deletemsg self rec:bytes<block_start>scidint,=struct.unpack(">Q" rec[2:])<line_sep>scid=ShortChannelId.from_int(scidint)<line_sep># It might have already been deleted when we skipped it. <if_stmt>scid<in>self.channels<block_start>self._del_channel(scid)<block_end><block_end><def_stmt>_pull_bytes self length:int<arrow>bool<block_start>"""Pull bytes from file into our internal buffer"""<if_stmt>len(self.store_buf)<l>length<block_start>self.store_buf<augadd>self.store_file.read(length-len(self.store_buf))<block_end><return>len(self.store_buf)<ge>length<block_end><def_stmt>_read_record self<arrow>Optional[bytes]<block_start>"""If a whole record is not in the file, returns None. If deleted, returns empty."""<if_stmt><not>self._pull_bytes(12)<block_start><return><none><block_end>hdr=GossipStoreHeader(self.store_buf[:12])<if_stmt><not>self._pull_bytes(12+hdr.length)<block_start><return><none><block_end>self.bytes_read<augadd>len(self.store_buf)<line_sep>ret=self.store_buf[12:]<line_sep>self.store_buf=bytes()<if_stmt>hdr.deleted<block_start>ret=bytes()<block_end><return>ret<block_end><def_stmt>refresh self<block_start>"""Catch up with any changes to the gossip store"""<while_stmt><true><block_start>off=self.bytes_read<line_sep>rec=self._read_record()<line_sep># EOF? <if_stmt>rec<is><none><block_start><break><block_end># Deleted? <if_stmt>len(rec)<eq>0<block_start><continue><block_end>rectype,=struct.unpack(">H" rec[:2])<if_stmt>rectype<eq>channel_announcement.number<block_start>self._add_channel(rec off <false>)<block_end><elif_stmt>rectype<eq>WIRE_GOSSIP_STORE_PRIVATE_CHANNEL<block_start>self._add_channel(rec[2+8+2:] off+2+8+2 <true>)<block_end><elif_stmt>rectype<eq>WIRE_GOSSIP_STORE_CHANNEL_AMOUNT<block_start>self._set_channel_amount(rec)<block_end><elif_stmt>rectype<eq>channel_update.number<block_start>self._update_channel(rec off)<block_end><elif_stmt>rectype<eq>WIRE_GOSSIP_STORE_PRIVATE_UPDATE<block_start>self._update_channel(rec[2+2:] off+2+2)<block_end><elif_stmt>rectype<eq>WIRE_GOSSIP_STORE_DELETE_CHAN<block_start>self._remove_channel_by_deletemsg(rec)<block_end><elif_stmt>rectype<eq>node_announcement.number<block_start>self._add_node_announcement(rec off)<block_end><elif_stmt>rectype<eq>WIRE_GOSSIP_STORE_ENDED<block_start>self.reopen_store()<block_end><else_stmt><block_start><continue><block_end><block_end><block_end><block_end>
"""Unit tests for interface implementations in pyatv.protocols.mrp."""<import_stmt>math<import_stmt>pytest<import_from_stmt>pyatv exceptions<import_from_stmt>pyatv.protocols.mrp MrpAudio messages protobuf<line_sep>DEVICE_UID="F2204E63-BCAB-4941-80A0-06C46CB71391"<line_sep># This mock is _extremely_ basic, so needs to be adjusted heavily when adding # new tests <class_stmt>MrpProtocolMock<block_start><def_stmt>__init__ self<block_start>self._listeners={}<line_sep>self.sent_messages=[]<block_end><def_stmt>add_listener self listener message_type data=<none><block_start>self._listeners[message_type]=listener<block_end><async_keyword><def_stmt>send self message<block_start>self.sent_messages.append(message)<block_end><async_keyword><def_stmt>inject self message<block_start><await>self._listeners[message.type](message <none>)<block_end><async_keyword><def_stmt>volume_controls_changed self device_uid controls_available<block_start>message=messages.create(protobuf.VOLUME_CONTROL_CAPABILITIES_DID_CHANGE_MESSAGE)<line_sep>message.inner().outputDeviceUID=device_uid<line_sep>message.inner().capabilities.volumeControlAvailable=controls_available<line_sep><await>self.inject(message)<block_end><block_end>@pytest.fixture(name="protocol")<def_stmt>protocol_fixture event_loop<block_start><yield>MrpProtocolMock()<block_end># MrpAudio @pytest.fixture(name="audio")<def_stmt>audio_fixture protocol<block_start><yield>MrpAudio(protocol)<block_end><async_keyword><def_stmt>test_audio_volume_control_availability protocol audio<block_start><assert_stmt><not>audio.is_available<line_sep><await>protocol.volume_controls_changed(DEVICE_UID <true>)<assert_stmt>audio.is_available<line_sep><await>protocol.volume_controls_changed(DEVICE_UID <false>)<assert_stmt><not>audio.is_available<block_end>@pytest.mark.parametrize("device_uid,controls_available,controls_expected" [(DEVICE_UID <true> <true>) ] )<async_keyword><def_stmt>test_audio_volume_control_capabilities_changed protocol audio device_uid controls_available controls_expected<block_start><assert_stmt><not>audio.is_available<line_sep><await>protocol.volume_controls_changed(device_uid controls_available)<assert_stmt>audio.is_available<eq>controls_expected<block_end>@pytest.mark.parametrize("device_uid,volume,expected_volume" [("foo" 0.2 0.0) # deviceUID mismatch => no update (DEVICE_UID 0.2 20.0) # deviceUID matches => update ] )<async_keyword><def_stmt>test_audio_volume_did_change protocol audio device_uid volume expected_volume<block_start><await>protocol.volume_controls_changed(DEVICE_UID <true>)<assert_stmt>math.isclose(audio.volume 0.0)<line_sep>message=messages.create(protobuf.VOLUME_DID_CHANGE_MESSAGE)<line_sep>message.inner().outputDeviceUID=device_uid<line_sep>message.inner().volume=volume<line_sep><await>protocol.inject(message)<assert_stmt>math.isclose(audio.volume expected_volume)<block_end><async_keyword><def_stmt>test_audio_set_volume protocol audio<block_start><await>protocol.volume_controls_changed(DEVICE_UID <true>)<line_sep><await>audio.set_volume(0.0)<assert_stmt>len(protocol.sent_messages)<eq>1<line_sep>message=protocol.sent_messages.pop()<assert_stmt>message.type<eq>protobuf.SET_VOLUME_MESSAGE<assert_stmt>message.inner().outputDeviceUID<eq>DEVICE_UID<assert_stmt>math.isclose(message.inner().volume 0.0 rel_tol=1e-02)<block_end><async_keyword><def_stmt>test_audio_set_volume_no_output_device audio<block_start><with_stmt>pytest.raises(exceptions.ProtocolError)<block_start><await>audio.set_volume(10)<block_end><block_end>
PROCESSOR_VERSION="0.7.0"<line_sep># Entities AREAS="areas"<line_sep>CAMERAS="cameras"<line_sep>ALL_AREAS="ALL"<line_sep># Metrics OCCUPANCY="occupancy"<line_sep>SOCIAL_DISTANCING="social-distancing"<line_sep>FACEMASK_USAGE="facemask-usage"<line_sep>IN_OUT="in-out"<line_sep>DWELL_TIME="dwell-time"<line_sep>
<import_from_stmt>django.test TestCase<import_from_stmt>django.contrib.formtools.tests.wizard.storage TestStorage<import_from_stmt>django.contrib.formtools.wizard.storage.session SessionStorage<class_stmt>TestSessionStorage(TestStorage TestCase)<block_start><def_stmt>get_storage self<block_start><return>SessionStorage<block_end><block_end>
<import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>events.classes Event<line_sep>event_document_auto_check_in=Event(name='checkouts_document_auto_check_in' label=_('Document automatically checked in'))<line_sep>event_document_check_in=Event(name='checkouts_document_check_in' label=_('Document checked in'))<line_sep>event_document_check_out=Event(name='checkouts_document_check_out' label=_('Document checked out'))<line_sep>event_document_forceful_check_in=Event(name='checkouts_document_forceful_check_in' label=_('Document forcefully checked in'))<line_sep>
<import_stmt>sys<line_sep>sys.path.append('.')<line_sep>sys.path.append('..')<import_from_stmt>networks.engine.eval_manager Evaluator<import_stmt>importlib<def_stmt>main <block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser(description="Test CFBI")<line_sep>parser.add_argument('--gpu_id' type=int default=7)<line_sep>parser.add_argument('--config' type=str default='configs.resnet101_cfbi')<line_sep>parser.add_argument('--ckpt_path' type=str default='test')<line_sep>args=parser.parse_args()<line_sep>config=importlib.import_module(args.config)<line_sep>cfg=config.cfg<line_sep>cfg.TEST_GPU_ID=args.gpu_id<line_sep>cfg.TEST_DATASET='test'<line_sep>cfg.TEST_CKPT_PATH=args.ckpt_path<line_sep>cfg.TEST_MULTISCALE=[0.5 1]<line_sep>cfg.TEST_FLIP=<true><line_sep>evaluator=Evaluator(cfg=cfg)<line_sep>evaluator.evaluating()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_stmt>PyQt5 QtCore QtGui QtWidgets<line_sep>__all__=['QFloatSlider']<class_stmt>QFloatSlider(QtWidgets.QSlider)<block_start>""" Subclass of QtWidgets.QSlider Horizontal slider giving floating point values. Usage: QFloatSlider(min, max, step, default) where min = minimum value of slider max = maximum value of slider step = interval between values. Must be a factor of (max-min) default = default (starting) value of slider """<def_stmt>__init__ self min_value max_value step default<block_start>super().__init__(QtCore.Qt.Horizontal)<line_sep>self.precision=0.0001<line_sep>self.min_value=min_value<line_sep>self.max_value=max_value<line_sep>self.step=step<line_sep>self.default=default<line_sep>self.quotient,self.remainder=self._float_divmod(self.max_value-self.min_value self.step)<if_stmt>self.remainder<block_start><raise>ValueError("{} does not fit evenly between {} and {}".format(step min_value max_value))<block_end>super().setMinimum(0)<line_sep>super().setMaximum(self.quotient)<line_sep>super().setSingleStep(1)<line_sep>super().setValue(self._float_to_int(self.default))<line_sep>super().valueChanged.connect(self._value_handler)<line_sep>#self.slider_value = 2.0 <block_end><def_stmt>setValue self value<block_start>super().setValue(self._float_to_int(value))<block_end># This is mostly disgusting python i hate floating points >:( <def_stmt>_float_divmod self a b<block_start>""" Basically the divmod function but it works for floats (try 0.3 % 0.1 smh) Returns the quotient, and a remainder. """<line_sep>a=abs(a)<line_sep>b=abs(b)<line_sep>n=1<while_stmt><true><block_start>c=a-b<line_sep>c=abs(c)<if_stmt>c<l>self.precision<block_start><return>(n 0)<block_end><elif_stmt>c<g>a<block_start><return>(n-1 a)<block_end>a=c<line_sep>n<augadd>1<block_end><block_end><def_stmt>_float_to_int self a<block_start><return>int(round(a/self.step))<block_end><def_stmt>_int_to_float self a<block_start><return>self.min_value+a<times>self.step<block_end><def_stmt>_value_handler self<block_start>self.slider_value=self._int_to_float(super().value())<block_end><block_end>
# -*- coding: utf-8 -*- """ Tic Toe Using pygame , numpy and sys with Graphical User Interface """<import_stmt>pygame<import_stmt>sys<import_from_stmt>pygame.locals *<import_stmt>numpy<as>np<line_sep># ------ # constants # ------- width=800<line_sep>height=800<line_sep>#row and columns board_rows=3<line_sep>board_columns=3<line_sep>cross_width=25<line_sep>square_size=width<floordiv>board_columns<line_sep># colors in RGB format line_Width=15<line_sep>red=(255 0 0)<line_sep>bg_color=(28 170 156)<line_sep>line_color=(23 145 135)<line_sep>circle_color=(239 231 200)<line_sep>cross_color=(66 66 66)<line_sep>space=square_size<floordiv>4<line_sep># circle circle_radius=square_size<floordiv>3<line_sep>circle_width=14<line_sep>pygame.init()<line_sep>screen=pygame.display.set_mode((height width))<line_sep>pygame.display.set_caption('Tic Tac Toe!')<line_sep>screen.fill(bg_color)<line_sep># color to display restart white=(255 255 255)<line_sep>green=(0 255 0)<line_sep>blue=(0 0 128)<line_sep>font=pygame.font.Font('freesansbold.ttf' 25)<line_sep># create a text suface object, # on which text is drawn on it. text=font.render('Press R to restart' <true> green blue)<line_sep>Won=font.render(" Won" <true> blue green)<line_sep>leave=font.render("Press X to Exit" <true> white red)<line_sep># create a rectangular object for the # text surface object leaveRect=text.get_rect()<line_sep>textRect=text.get_rect()<line_sep>winRect=Won.get_rect()<line_sep>winRect.center=(100 30)<line_sep>textRect.center=(width-400 30)<line_sep>leaveRect.center=(width-120 30)<line_sep>board=np.zeros((board_rows board_columns))<line_sep># print(board) #pygame.draw.line( screen ,red ,(10,10),(300,300),10) <def_stmt>draw_figures <block_start><for_stmt>row range(board_rows)<block_start><for_stmt>col range(board_columns)<block_start><if_stmt>board[row][col]<eq>1<block_start>pygame.draw.circle(screen circle_color (int(col<times>square_size+square_size<floordiv>2) int(row<times>square_size+square_size<floordiv>2)) circle_radius circle_width)<block_end><elif_stmt>board[row][col]<eq>2<block_start>pygame.draw.line(screen cross_color (col<times>square_size+space row<times>square_size+square_size-space) (col<times>square_size+square_size-space row<times>square_size+space) cross_width)<line_sep>pygame.draw.line(screen cross_color (col<times>square_size+space row<times>square_size+space) (col<times>square_size+square_size-space row<times>square_size+square_size-space) cross_width)<block_end><block_end><block_end><block_end><def_stmt>draw_lines <block_start>pygame.draw.line(screen line_color (0 square_size) (width square_size) line_Width)<line_sep># 2nd horizontal line pygame.draw.line(screen line_color (0 2<times>square_size) (width 2<times>square_size) line_Width)<line_sep># 1st verticle pygame.draw.line(screen line_color (square_size 0) (square_size height) line_Width)<line_sep># 2nd verticle pygame.draw.line(screen line_color (2<times>square_size 0) (2<times>square_size height) line_Width)<block_end># To mark which square player has chosen <def_stmt>mark_square row col player<block_start>board[row][col]=player<block_end># TO check the availablity of a square <def_stmt>available_square row col<block_start><return>board[row][col]<eq>0<block_end># check board full or not <def_stmt>is_board_full <block_start>k=<false><for_stmt>row range(board_rows)<block_start><for_stmt>col range(board_columns)<block_start><if_stmt>board[row][col]<eq>0<block_start>k=<false><block_end><else_stmt><block_start>k=<true><block_end><block_end><block_end><return>k<block_end><def_stmt>check_win player# check verticle win <block_start><for_stmt>col range(board_columns)<block_start><if_stmt>board[0][col]<eq>player<and>board[1][col]<eq>player<and>board[2][col]<eq>player<block_start>draw_vertical_winning_line(col player)<line_sep><return><true><block_end><block_end># check Horizontal win <for_stmt>row range(board_rows)<block_start><if_stmt>board[row][0]<eq>player<and>board[row][1]<eq>player<and>board[row][2]<eq>player<block_start>draw_horizontal_winning_line(row player)<line_sep><return><true><block_end><block_end># check for asc win <if_stmt>board[2][0]<eq>player<and>board[1][1]<eq>player<and>board[0][2]<eq>player<block_start>draw_asc_diagonal(player)<line_sep><return><true><block_end><if_stmt>board[0][0]<eq>player<and>board[1][1]<eq>player<and>board[2][2]<eq>player<block_start>draw_des_diagonal(player)<line_sep><return><true><block_end><block_end><def_stmt>draw_horizontal_winning_line row player<block_start>posY=row<times>square_size+square_size<floordiv>2<if_stmt>(player<eq>1)<block_start>color=circle_color<block_end><else_stmt><block_start>color=cross_color<block_end>pygame.draw.line(screen color (15 posY) (width-15 posY) 15)<block_end><def_stmt>draw_vertical_winning_line col player<block_start>posX=col<times>square_size+square_size<floordiv>2<if_stmt>(player<eq>1)<block_start>color=circle_color<block_end><else_stmt><block_start>color=cross_color<block_end>pygame.draw.line(screen color (posX 15) (posX width-15) 15)<block_end><def_stmt>draw_asc_diagonal player<block_start><if_stmt>(player<eq>1)<block_start>color=circle_color<block_end><else_stmt><block_start>color=cross_color<block_end>pygame.draw.line(screen color (15 height-15) (width-15 15) 15)<block_end><def_stmt>draw_des_diagonal player<block_start><if_stmt>(player<eq>1)<block_start>color=circle_color<block_end><else_stmt><block_start>color=cross_color<block_end>pygame.draw.line(screen color (15 15) (width-15 height-15) 15)<block_end><def_stmt>restart <block_start>screen.fill(bg_color)<line_sep>draw_lines()<line_sep>player=1<for_stmt>row range(board_rows)<block_start><for_stmt>col range(board_columns)<block_start>board[row][col]=0<block_end><block_end><block_end>draw_lines()<line_sep># player player=1<line_sep>game_over=<false><while_stmt><true># main game loop <block_start><for_stmt>event pygame.event.get()# constantly looks for the event <block_start><if_stmt>event.type<eq>pygame.QUIT# if user clicks exit pygame.QUIT and sys exits <block_start>pygame.quit()<line_sep>sys.exit()<block_end>board_full=is_board_full()<if_stmt>board_full<and><not>game_over<block_start>Won=font.render(" It's a Tie " <true> blue green)<line_sep>screen.blit(Won winRect)<line_sep>screen.blit(text textRect)<line_sep>screen.blit(leave leaveRect)<block_end><if_stmt>event.type<eq>pygame.MOUSEBUTTONDOWN<and><not>game_over<block_start>mouseX=event.pos[0]# x mouseY=event.pos[1]# y clicked_row=int(mouseY<floordiv>square_size)<line_sep>clicked_column=int(mouseX<floordiv>square_size)<if_stmt>available_square(clicked_row clicked_column)<block_start>mark_square(clicked_row clicked_column player)<if_stmt>(check_win(player))<block_start>game_over=<true><line_sep>Won=font.render("Player"+str(player)+" Won " <true> blue green)<line_sep>screen.blit(Won winRect)<line_sep>screen.blit(text textRect)<line_sep>screen.blit(leave leaveRect)<block_end>player=player%2+1<if_stmt><not>game_over<and><not>board_full<block_start>Won=font.render("Player"+str(player)+" Turn " <true> blue green)<line_sep>screen.blit(Won winRect)<block_end>draw_figures()<block_end><block_end># to restart the game <if_stmt>event.type<eq>pygame.KEYDOWN<block_start><if_stmt>event.key<eq>pygame.K_r<block_start>restart()<line_sep>game_over=<false><block_end><elif_stmt>event.key<eq>pygame.K_x<block_start>pygame.quit()<line_sep>sys.exit()<line_sep># print(board) <block_end><block_end><block_end>pygame.display.update()<block_end>
""" Module for guiding construction of the Wavelength Image .. include common links, assuming primary doc root is up one directory .. include:: ../links.rst """<import_stmt>inspect<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>pypeit msgs<import_from_stmt>pypeit utils<import_from_stmt>pypeit datamodel<import_from_stmt>IPython embed<class_stmt>WaveImage(datamodel.DataContainer)<block_start>version='1.0.0'<line_sep># I/O output_to_disk=<none>#('WVTILTS_IMAGE', 'WVTILTS_FULLMASK', 'WVTILTS_DETECTOR_CONTAINER') hdu_prefix=<none><line_sep># Master fun master_type='Wave'<line_sep>master_file_format='fits'<line_sep>datamodel={'image':dict(otype=np.ndarray atype=np.floating desc='2D Wavelength image') 'PYP_SPEC':dict(otype=str desc='PypeIt spectrograph name') }<def_stmt>__init__ self image PYP_SPEC=<none># Parse <block_start>args,_,_,values=inspect.getargvalues(inspect.currentframe())<line_sep>d=dict([(k values[k])<for>k args[1:]])<line_sep># Setup the DataContainer datamodel.DataContainer.__init__(self d=d)<block_end><block_end><class_stmt>BuildWaveImage(object)<block_start>""" Class to generate the Wavelength Image Args: slits (:class:`pypeit.edgetrace.SlitTraceSet`): Object holding the slit edge locations tilts (np.ndarray or None): Tilt image wv_calib (dict or None): wavelength solution dictionary Parameters are read from wv_calib['par'] spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The `Spectrograph` instance that sets the instrument used to take the observations. Used to set :attr:`spectrograph`. det (int or None): Attributes: image (np.ndarray): Wavelength image steps (list): List of the processing steps performed """<line_sep>master_type='Wave'<line_sep># @classmethod # def from_master_file(cls, master_file): # """ # # Args: # master_file (str): # # Returns: # waveimage.WaveImage: # # """ # # Spectrograph # spectrograph, extras = masterframe.items_from_master_file(master_file) # head0 = extras[0] # # Master info # master_dir = head0['MSTRDIR'] # master_key = head0['MSTRKEY'] # # Instantiate # slf = cls(None, None, None, spectrograph, None, master_dir=master_dir, # master_key=master_key, reuse_masters=True) # slf.image = slf.load(ifile=master_file) # # Return # return slf # TODO: Is maskslits ever anything besides slits.mask? (e.g., see calibrations.py call) <def_stmt>__init__ self slits tilts wv_calib spectrograph det# MasterFrame #masterframe.MasterFrame.__init__(self, self.master_type, master_dir=master_dir, # master_key=master_key, reuse_masters=reuse_masters) # Required parameters <block_start>self.spectrograph=spectrograph<line_sep>self.det=det<line_sep># TODO: Do we need to assign slits to self? self.slits=slits<line_sep>self.tilts=tilts<line_sep>self.wv_calib=wv_calib<if_stmt>self.slits<is><none><block_start>self.slitmask=<none><line_sep>self.slit_spat_pos=<none><block_end><else_stmt># NOTE: This uses the pad defined by EdgeTraceSetPar <block_start>self.slitmask=self.slits.slit_img()<line_sep># This selects the coordinates for the tweaked edges if # they exist, original otherwise. self.slit_spat_pos=self.slits.spatial_coordinates()<block_end># For echelle order, primarily # TODO: only echelle is ever used. Do we need to keep the whole # thing? self.par=wv_calib['par']<if>wv_calib<is><not><none><else><none><line_sep># Main output self.image=<none><line_sep>self.steps=[]<block_end><def_stmt>build_wave self<block_start>""" Main algorithm to build the wavelength image Returns: `numpy.ndarray`_: The wavelength image. """<line_sep># Loop on slits ok_slits=np.where(np.invert(self.slits.mask))[0]<line_sep>self.image=np.zeros_like(self.tilts)<line_sep>nspec=self.slitmask.shape[0]<line_sep># Error checking on the wv_calib #if (nspec-1) != int(self.wv_calib[str(0)]['fmax']): # msgs.error('Your wavelength fits used inconsistent normalization. Something is wrong!') # If this is echelle print out a status message and do some error checking <if_stmt>self.par['echelle']<block_start>msgs.info('Evaluating 2-d wavelength solution for echelle....')<if_stmt>len(self.wv_calib['fit2d']['orders'])<ne>len(ok_slits)<block_start>msgs.error('wv_calib and ok_slits do not line up. Something is very wrong!')<block_end><block_end># Unpack some 2-d fit parameters if this is echelle <for_stmt>slit ok_slits<block_start>thismask=(self.slitmask<eq>slit)<if_stmt>self.par['echelle']# TODO: Put this in `SlitTraceSet`? <block_start>order,indx=self.spectrograph.slit2order(self.slit_spat_pos[slit])<line_sep># evaluate solution self.image[thismask]=utils.func_val(self.wv_calib['fit2d']['coeffs'] self.tilts[thismask] self.wv_calib['fit2d']['func2d'] x2=np.ones_like(self.tilts[thismask])<times>order minx=self.wv_calib['fit2d']['min_spec'] maxx=self.wv_calib['fit2d']['max_spec'] minx2=self.wv_calib['fit2d']['min_order'] maxx2=self.wv_calib['fit2d']['max_order'])<line_sep>self.image[thismask]<augdiv>order<block_end><else_stmt><block_start>iwv_calib=self.wv_calib[str(slit)]<line_sep>self.image[thismask]=utils.func_val(iwv_calib['fitc'] self.tilts[thismask] iwv_calib['function'] minx=iwv_calib['fmin'] maxx=iwv_calib['fmax'])<block_end><block_end># Return self.steps.append(inspect.stack()[0][3])<line_sep><return>WaveImage(self.image PYP_SPEC=self.spectrograph.spectrograph)<block_end><def_stmt>__repr__ self# Generate sets string <block_start>txt='<{:s}: >'.format(self.__class__.__name__)<line_sep><return>txt<block_end><block_end>
<import_from_stmt>poliastro.earth.atmosphere.coesa62 COESA62<import_from_stmt>poliastro.earth.atmosphere.coesa76 COESA76<line_sep>__all__=["COESA62" "COESA76"]<line_sep>
<import_from_stmt>rpython.rlib.rstring StringBuilder<import_from_stmt>rpython.rtyper.lltypesystem lltype rffi<import_from_stmt>rpython.rlib.rarithmetic intmask<import_from_stmt>rpython.rlib.rrandom Random<import_from_stmt>hippy.builtin wrap Optional BoolArg StringArg<import_from_stmt>hippy.objects.base W_Root<import_from_stmt>hippy.module.date timelib<import_from_stmt>hippy.module.phpstruct _unpack<import_from_stmt>hippy.phpcompiler compile_php<line_sep>_random=Random()<def_stmt>connection_aborted <block_start>""" Check whether client disconnected"""<line_sep><return>NotImplementedError()<block_end><def_stmt>connection_status <block_start>""" Returns connection status bitfield"""<line_sep><return>NotImplementedError()<block_end><def_stmt>connection_timeout <block_start>""" Check if the script timed out"""<line_sep><return>NotImplementedError()<block_end><def_stmt>_lookup_constant interp constname<block_start>i=constname.find(':')<if_stmt>i<l>0<block_start><return>interp.locate_constant(constname <false>)<block_end><elif_stmt>i+1<l>len(constname)<and>constname[i+1]<eq>':'<block_start>clsname=constname[:i]<line_sep>realname=constname[i+2:]<line_sep>klass=interp.lookup_class_or_intf(clsname)<if_stmt>klass<is><not><none><block_start><return>klass.constants_w.get(realname <none>)<block_end><block_end><block_end>@wrap(['interp' str])<def_stmt>constant interp constname<block_start>""" Returns the value of a constant"""<line_sep>w_obj=_lookup_constant(interp constname)<if_stmt>w_obj<is><none><block_start>interp.warn("constant(): Couldn't find constant %s"%constname)<line_sep><return>interp.space.w_Null<block_end><return>w_obj<block_end>@wrap(['interp' str W_Root Optional(bool)])<def_stmt>define interp name w_obj case_insensitive=<false><block_start>""" Defines a named constant"""<if_stmt>interp.locate_constant(name <false>)<is><not><none><block_start>interp.notice("Constant %s already defined"%name)<line_sep><return>interp.space.w_False<block_end>interp.declare_new_constant(name w_obj)<line_sep><return>interp.space.w_True<block_end>@wrap(['interp' str])<def_stmt>defined interp name<block_start>""" Checks whether a given named constant exists"""<line_sep><return>interp.space.newbool(_lookup_constant(interp name)<is><not><none>)<block_end><def_stmt>die <block_start>""" Equivalent to exit"""<line_sep><return>NotImplementedError()<block_end><def_stmt>exit <block_start>""" Output a message and terminate the current script"""<line_sep><return>NotImplementedError()<block_end><def_stmt>get_browser <block_start>""" Tells what the user's browser is capable of"""<line_sep><return>NotImplementedError()<block_end><def_stmt>__halt_compiler <block_start>""" Halts the compiler execution"""<line_sep><return>NotImplementedError()<block_end><def_stmt>highlight_file <block_start>""" Syntax highlighting of a file"""<line_sep><return>NotImplementedError()<block_end><def_stmt>highlight_string <block_start>""" Syntax highlighting of a string"""<line_sep><return>NotImplementedError()<block_end><def_stmt>ignore_user_abort <block_start>""" Set whether a client disconnect should abort script execution"""<line_sep><return>NotImplementedError()<block_end><def_stmt>pack <block_start>""" Pack data into binary string"""<line_sep><return>NotImplementedError()<block_end><def_stmt>php_check_syntax <block_start>""" Check the PHP syntax of (and execute) the specified file"""<line_sep><return>NotImplementedError()<block_end><def_stmt>php_strip_whitespace <block_start>""" Return source with stripped comments and whitespace"""<line_sep><return>NotImplementedError()<block_end><def_stmt>show_source <block_start>""" Alias of highlight_file"""<line_sep><return>NotImplementedError()<block_end>@wrap(['interp' int] error=<false>)<def_stmt>sleep interp seconds<block_start>""" Delay execution"""<if_stmt>seconds<l>0<block_start>interp.warn("sleep(): Number of seconds must be greater than or equal to 0")<line_sep><return>interp.space.w_False<block_end><import_stmt>time<line_sep>time.sleep(seconds)<line_sep># TODO: when a signal is received and a handler is defined, the remaining # number of seconds as float should be returned. <return>interp.space.newint(0)<block_end><def_stmt>sys_getloadavg <block_start>""" Gets system load average"""<line_sep><return>NotImplementedError()<block_end><def_stmt>time_nanosleep <block_start>""" Delay for a number of seconds and nanoseconds"""<line_sep><return>NotImplementedError()<block_end><def_stmt>time_sleep_until <block_start>""" Make the script sleep until the specified time"""<line_sep><return>NotImplementedError()<block_end><def_stmt>_zero_pad s c<block_start>l=len(s)<if_stmt>l<g>c<block_start><return>s<block_end><return>"0"<times>(c-l)+s<block_end>@wrap(['space' Optional(str) Optional(BoolArg(<none>))])<def_stmt>uniqid space prefix='' more_entropy=<false><block_start>""" Generate a unique ID"""<line_sep>timeval=lltype.malloc(timelib.timeval flavor='raw')<line_sep>void=lltype.nullptr(rffi.VOIDP.TO)<line_sep>timelib.c_gettimeofday(timeval void)<line_sep>sec=intmask(timeval.c_tv_sec)<line_sep>usec=intmask(timeval.c_tv_usec)<line_sep>builder=StringBuilder()<if_stmt>prefix<block_start>builder.append(prefix)<block_end>builder.append(_zero_pad(hex(sec)[2:] 8))<line_sep>builder.append(_zero_pad(hex(usec)[2:] 5))<if_stmt>more_entropy<block_start>builder.append(".")<line_sep>builder.append(str(_random.random())[2:11])<block_end><return>space.newstr(builder.build())<block_end>@wrap(['space' StringArg(<none>) StringArg(<none>)])<def_stmt>unpack space formats string<block_start>""" Unpack data from binary string"""<line_sep><return>_unpack(space formats string)<block_end>@wrap(['interp' int])<def_stmt>usleep interp microseconds<block_start>""" Delay execution in microseconds"""<if_stmt>microseconds<l>0<block_start>interp.warn("usleep(): Number of microseconds must be greater than or equal to 0")<line_sep><return>interp.space.w_False<block_end><import_stmt>time<line_sep>time.sleep(microseconds/1000000.0)<block_end>
<import_stmt>whitebox<import_stmt>ast<import_stmt>json<import_stmt>os<import_stmt>sys<line_sep>wbt=whitebox.WhiteboxTools()<line_sep># wbt.set_verbose_mode(True) # print(wbt.version()) # print(wbt.help()) # tools = wbt.list_tools(['dem']) # for index, tool in enumerate(tools): # print("{}. {}: {}".format(index, tool, tools[tool])) # def get_tool_params(tool_name): # out_str = wbt.tool_parameters(tool_name) # start_index = out_str.index('[') + 1 # end_index = len(out_str.strip()) - 2 # params = out_str[start_index : end_index] # print(params) # sub_params = params.split('{"name"') # param_list = [] # for param in sub_params: # param = param.strip() # if len(param) > 0: # item = '"name"' + param # item = item[ : item.rfind("}")].strip() # param_list.append(item) # params_dict = {} # for item in param_list: # print("{}\n".format(item)) # param_dict = {} # index_name = item.find("name") # index_flags = item.find("flags") # index_description = item.find("description") # index_parameter_type = item.find("parameter_type") # index_default_value = item.find("default_value") # index_optional = item.find("optional") # name = item[index_name - 1 : index_flags - 2].replace('"name":', '') # name = name.replace('"', '') # param_dict['name'] = name # flags = item[index_flags - 1 : index_description -2].replace('"flags":', '') # if "--" in flags: # flags = flags.split('--')[1][: -2] # else: # flags = flags.split('-')[1][: -2] # param_dict['flags'] = flags # desc = item[index_description - 1 : index_parameter_type - 2].replace('"description":', '') # desc = desc.replace('"', '') # param_dict['description'] = desc # param_type = item[index_parameter_type - 1 : index_default_value - 2].replace('"parameter_type":', '') # param_type = ast.literal_eval(param_type) # param_dict['parameter_type'] = param_type # default_value = item[index_default_value - 1 : index_optional - 2].replace('"default_value":', '') # param_dict['default_value'] = default_value # optional = item[index_optional - 1 :].replace('"optional":', '') # param_dict['optional'] = optional # params_dict[flags] = param_dict # return params_dict # tool_name = "BreachDepressions" # print(wbt.tool_parameters(tool_name)) # params = get_tool_params(tool_name) # print(params) # print(params.keys()) # print(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) # lines = wbt.list_tools() # print(lines) # # for line in lines: # # print(line) # print(len(lines)) # parameter_types = [] # for param in params: # param_type = params[param]['parameter_type'] # if param_type not in parameter_types: # parameter_types.append(param_type) # print(parameter_types) # thisset = {"apple", "banana", "cherry"} # thisset.add("orange") # print(thisset) # tools = wbt.list_tools() # for index, tool in enumerate(sorted(tools)): # print("{}: {}".format(index, tool)) # dem = "/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/DEM.tif" # output = "/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/output.tif" # wbt.run_tool("BreachDepressions", '--dem=dem --output=output') # exe_path = "/home/qiusheng/Downloads/WBT/whitebox_tools" # cmd = exe_path + ' --run=BreachDepressions --dem="/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/DEM.tif" --output="/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/testdata/output.tif" -v' # print(os.popen(cmd).read().rstrip()) # ret = wbt.breach_depressions(dem, output) # print(ret) # print(type(ret)) # def redirect_to_file(text): # original = sys.stdout # sys.stdout = open('/media/hdd/Dropbox/git/WhiteboxTools-ArcGIS/WBT/PRE/redirect.txt', 'w') # # print('This is your redirected text:') # # print(text) # wbt.breach_depressions(dem, output) # sys.stdout = original # print('This string goes to stdout, NOT the file!') # redirect_to_file('Python rocks!') # https://goo.gl/bFo2tD # import sys # if sys.version_info < (3, 0): # from StringIO import StringIO # else: # from io import StringIO # old_stdout = sys.stdout # result = StringIO() # sys.stdout = result # # wbt.breach_depressions(dem, output) # # print("test string") # sys.stdout = old_stdout # result_string = result.getvalue() # print(result_string) # print('--dem="/path/to/DEM.tif" --output="/path/to/output.tif"')
# Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 <import_stmt>arvados<import_stmt>arvados.errors<import_stmt>time<import_stmt>json<while_stmt><true><block_start><try_stmt><block_start>api=arvados.api()<line_sep><break><block_end><except_stmt>arvados.errors.ApiError<block_start>time.sleep(2)<block_end><block_end>existing=api.users().list(filters=[["email" "=" "<EMAIL>"] ["is_active" "=" <true>]] limit=1).execute()<if_stmt>existing["items"]<block_start>u=existing["items"][0]<block_end><else_stmt><block_start>u=api.users().create(body={'first_name':'Test' 'last_name':'User' 'email':'<EMAIL>' 'is_admin':<false>}).execute()<line_sep>api.users().activate(uuid=u["uuid"]).execute()<block_end>tok=api.api_client_authorizations().create(body={"api_client_authorization":{"owner_uuid":u["uuid"]}}).execute()<with_stmt>open("cwl.output.json" "w")<as>f<block_start>json.dump({"test_user_uuid":u["uuid"] "test_user_token":"v2/%s/%s"%(tok["uuid"] tok["api_token"])} f)<block_end>
# ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- """ Parsers for the analyzer output formats (plist ...) should create this Report which will be stored. Multiple bug identification hash-es can be generated. All hash generation algorithms should be documented and implemented here. """<import_stmt>logging<line_sep>LOG=logging.getLogger('ReportConverter')<def_stmt>get_line file_name line_no errors='ignore'<block_start>""" Return the given line from the file. If line_no is larger than the number of lines in the file then empty string returns. If the file can't be opened for read, the function also returns empty string. Try to encode every file as utf-8 to read the line content do not depend on the platform settings. By default locale.getpreferredencoding() is used which depends on the platform. Changing the encoding error handling can influence the hash content! """<try_stmt><block_start><with_stmt>open(file_name mode='r' encoding='utf-8' errors=errors)<as>source_file<block_start><for_stmt>line source_file<block_start>line_no<augsub>1<if_stmt>line_no<eq>0<block_start><return>line<block_end><block_end><return>''<block_end><block_end><except_stmt>IOError<block_start>LOG.error("Failed to open file %s" file_name)<line_sep><return>''<block_end><block_end><def_stmt>remove_whitespace line_content old_col<block_start>""" Removes white spaces from the given line content. This function removes white spaces from the line content parameter and calculates the new line location. Returns the line content without white spaces and the new column number. E.g.: line_content = " int foo = 17; sizeof(43); " ^ |- bug_col = 18 content_begin = " int foo = 17; " content_begin_strip = "intfoo=17;" line_strip_len = 18 - 10 => 8 ''.join(line_content.split()) => "intfoo=17;sizeof(43);" ^ |- until_col - line_strip_len 18 - 8 = 10 """<line_sep>content_begin=line_content[:old_col]<line_sep>content_begin_strip=''.join(content_begin.split())<line_sep>line_strip_len=len(content_begin)-len(content_begin_strip)<line_sep><return>''.join(line_content.split()) old_col-line_strip_len<block_end>
"""3D Bar plot of a TOF camera with hexagonal pixels"""<import_from_stmt>vedo *<import_stmt>numpy<as>np<line_sep>settings.defaultFont="Glasgo"<line_sep>settings.useParallelProjection=<true><line_sep>vals=np.abs(np.random.randn(4<times>6))# pixel heights cols=colorMap(vals "summer")<line_sep>k=0<line_sep>items=[__doc__]<for_stmt>i range(4)<block_start><for_stmt>j range(6)<block_start>val,col=vals[k] cols[k]<line_sep>x,y,z=[i+j%2/2 j/1.155 val+0.01]<line_sep>zbar=Polygon([x y 0] nsides=6 r=0.55 c=col).extrude(val)<line_sep>line=Polygon([x y z] nsides=6 r=0.55 c='k').wireframe().lw(2)<line_sep>txt=Text3D(f"{i}/{j}" [x y z] s=.15 c='k' justify='center')<line_sep>items<augadd>[zbar line txt]<line_sep>k<augadd>1<block_end><block_end>show(items axes=7)<line_sep>
<import_from_future_stmt> annotations<import_stmt>asyncio<import_stmt>logging<import_stmt>operator<import_from_stmt>decimal Decimal<import_from_stmt>typing Awaitable<import_stmt>aiohttp<import_from_stmt>..store DataStore DataStoreManager<import_from_stmt>..typedefs Item<import_from_stmt>..ws ClientWebSocketResponse<line_sep>logger=logging.getLogger(__name__)<class_stmt>bitFlyerDataStore(DataStoreManager)<block_start><def_stmt>_init self<arrow><none><block_start>self.create('board' datastore_class=Board)<line_sep>self.create('ticker' datastore_class=Ticker)<line_sep>self.create('executions' datastore_class=Executions)<line_sep>self.create('childorderevents' datastore_class=ChildOrderEvents)<line_sep>self.create('childorders' datastore_class=ChildOrders)<line_sep>self.create('parentorderevents' datastore_class=ParentOrderEvents)<line_sep>self.create('parentorders' datastore_class=ParentOrders)<line_sep>self.create('positions' datastore_class=Positions)<line_sep>self._snapshots=set()<block_end><async_keyword><def_stmt>initialize self *aws:Awaitable[aiohttp.ClientResponse]<arrow><none><block_start><for_stmt>f asyncio.as_completed(aws)<block_start>resp=<await>f<line_sep>data=<await>resp.json()<if_stmt>resp.url.path<eq>'/v1/me/getchildorders'<block_start>self.childorders._onresponse(data)<block_end><elif_stmt>resp.url.path<eq>'/v1/me/getparentorders'<block_start>self.parentorders._onresponse(data)<block_end><elif_stmt>resp.url.path<eq>'/v1/me/getpositions'<block_start>self.positions._onresponse(data)<block_end><block_end><block_end><def_stmt>_onmessage self msg:Item ws:ClientWebSocketResponse<arrow><none><block_start><if_stmt>'error'<in>msg<block_start>logger.warning(msg)<block_end><if_stmt>'params'<in>msg<block_start>channel:str=msg['params']['channel']<line_sep>message=msg['params']['message']<if_stmt>channel.startswith('lightning_board_')<block_start><if_stmt>channel.startswith('lightning_board_snapshot_')<block_start>asyncio.create_task(ws.send_json({'method':'unsubscribe' 'params':{'channel':channel} }))<line_sep>product_code=channel.replace('lightning_board_snapshot_' '')<line_sep>self.board._delete(self.board.find({'product_code':product_code}))<line_sep>self._snapshots.add(product_code)<block_end><else_stmt><block_start>product_code=channel.replace('lightning_board_' '')<block_end><if_stmt>product_code<in>self._snapshots<block_start>self.board._onmessage(product_code message)<block_end><block_end><elif_stmt>channel.startswith('lightning_ticker_')<block_start>self.ticker._onmessage(message)<block_end><elif_stmt>channel.startswith('lightning_executions_')<block_start>product_code=channel.replace('lightning_executions_' '')<line_sep>self.executions._onmessage(product_code message)<block_end><elif_stmt>channel<eq>'child_order_events'<block_start>self.childorderevents._onmessage(message)<line_sep>self.childorders._onmessage(message)<line_sep>self.positions._onmessage(message)<block_end><elif_stmt>channel<eq>'parent_order_events'<block_start>self.parentorderevents._onmessage(message)<line_sep>self.parentorders._onmessage(message)<block_end><block_end><block_end>@property<def_stmt>board self<arrow>'Board'<block_start><return>self.get('board' Board)<block_end>@property<def_stmt>ticker self<arrow>'Ticker'<block_start><return>self.get('ticker' Ticker)<block_end>@property<def_stmt>executions self<arrow>'Executions'<block_start><return>self.get('executions' Executions)<block_end>@property<def_stmt>childorderevents self<arrow>'ChildOrderEvents'<block_start><return>self.get('childorderevents' ChildOrderEvents)<block_end>@property<def_stmt>childorders self<arrow>'ChildOrders'<block_start><return>self.get('childorders' ChildOrders)<block_end>@property<def_stmt>parentorderevents self<arrow>'ParentOrderEvents'<block_start><return>self.get('parentorderevents' ParentOrderEvents)<block_end>@property<def_stmt>parentorders self<arrow>'ParentOrders'<block_start><return>self.get('parentorders' ParentOrders)<block_end>@property<def_stmt>positions self<arrow>'Positions'<block_start><return>self.get('positions' Positions)<block_end><block_end><class_stmt>Board(DataStore)<block_start>_KEYS=['product_code' 'side' 'price']<def_stmt>_init self<arrow><none><block_start>self.mid_price:dict[str float]={}<block_end><def_stmt>sorted self query:Item=<none><arrow>dict[str list[Item]]<block_start><if_stmt>query<is><none><block_start>query={}<block_end>result={'SELL':[] 'BUY':[]}<for_stmt>item self<block_start><if_stmt>all(k<in>item<and>query[k]<eq>item[k]<for>k query)<block_start>result[item['side']].append(item)<block_end><block_end>result['SELL'].sort(key=<lambda>x:x['price'])<line_sep>result['BUY'].sort(key=<lambda>x:x['price'] reverse=<true>)<line_sep><return>result<block_end><def_stmt>_onmessage self product_code:str message:Item<arrow><none><block_start>self.mid_price[product_code]=message['mid_price']<for_stmt>key,side (('bids' 'BUY') ('asks' 'SELL'))<block_start><for_stmt>item message[key]<block_start><if_stmt>item['size']<block_start>self._insert([{'product_code':product_code 'side':side **item}])<block_end><else_stmt><block_start>self._delete([{'product_code':product_code 'side':side **item}])<block_end><block_end><block_end>board=self.sorted({'product_code':product_code})<line_sep>targets=[]<for_stmt>side,ope (('BUY' operator.le) ('SELL' operator.gt))<block_start><for_stmt>item board[side]<block_start><if_stmt>ope(item['price'] message['mid_price'])<block_start><break><block_end><else_stmt><block_start>targets.append(item)<block_end><block_end><block_end>self._delete(targets)<block_end><block_end><class_stmt>Ticker(DataStore)<block_start>_KEYS=['product_code']<def_stmt>_onmessage self message:Item<arrow><none><block_start>self._update([message])<block_end><block_end><class_stmt>Executions(DataStore)<block_start>_MAXLEN=99999<def_stmt>_onmessage self product_code:str message:list[Item]<arrow><none><block_start><for_stmt>item message<block_start>self._insert([{'product_code':product_code **item}])<block_end><block_end><block_end><class_stmt>ChildOrderEvents(DataStore)<block_start><def_stmt>_onmessage self message:list[Item]<arrow><none><block_start>self._insert(message)<block_end><block_end><class_stmt>ParentOrderEvents(DataStore)<block_start><def_stmt>_onmessage self message:list[Item]<arrow><none><block_start>self._insert(message)<block_end><block_end><class_stmt>ChildOrders(DataStore)<block_start>_KEYS=['child_order_acceptance_id']<def_stmt>_onresponse self data:list[Item]<arrow><none><block_start><if_stmt>data<block_start>self._delete(self.find({'product_code':data[0]['product_code']}))<for_stmt>item data<block_start><if_stmt>item['child_order_state']<eq>'ACTIVE'<block_start>self._insert([item])<block_end><block_end><block_end><block_end><def_stmt>_onmessage self message:list[Item]<arrow><none><block_start><for_stmt>item message<block_start><if_stmt>item['event_type']<eq>'ORDER'<block_start>self._insert([item])<block_end><elif_stmt>item['event_type']<in>('CANCEL' 'EXPIRE')<block_start>self._delete([item])<block_end><elif_stmt>item['event_type']<eq>'EXECUTION'<block_start><if_stmt>item['outstanding_size']<block_start>childorder=self.get(item)<if_stmt>childorder<block_start><if_stmt>isinstance(childorder['size'] int)<and>isinstance(item['size'] int)<block_start>childorder['size']<augsub>item['size']<block_end><else_stmt><block_start>childorder['size']=float(Decimal(str(childorder['size']))-Decimal(str(item['size'])))<block_end><block_end><block_end><else_stmt><block_start>self._delete([item])<block_end><block_end><block_end><block_end><block_end><class_stmt>ParentOrders(DataStore)<block_start>_KEYS=['parent_order_acceptance_id']<def_stmt>_onresponse self data:list[Item]<arrow><none><block_start><if_stmt>data<block_start>self._delete(self.find({'product_code':data[0]['product_code']}))<for_stmt>item data<block_start><if_stmt>item['parent_order_state']<eq>'ACTIVE'<block_start>self._insert([item])<block_end><block_end><block_end><block_end><def_stmt>_onmessage self message:list[Item]<arrow><none><block_start><for_stmt>item message<block_start><if_stmt>item['event_type']<eq>'ORDER'<block_start>self._insert([item])<block_end><elif_stmt>item['event_type']<in>('CANCEL' 'EXPIRE')<block_start>self._delete([item])<block_end><elif_stmt>item['event_type']<eq>'COMPLETE'<block_start>parentorder=self.get(item)<if_stmt>parentorder<block_start><if_stmt>parentorder['parent_order_type']<in>('IFD' 'IFDOCO')<block_start><if_stmt>item['parameter_index']<ge>2<block_start>self._delete([item])<block_end><block_end><else_stmt><block_start>self._delete([item])<block_end><block_end><block_end><block_end><block_end><block_end><class_stmt>Positions(DataStore)<block_start>_COMMON_KEYS=['product_code' 'side' 'price' 'size' 'commission' 'sfd' ]<def_stmt>_common_keys self item:Item<arrow>Item<block_start><return>{key:item[key]<for>key self._COMMON_KEYS}<block_end><def_stmt>_onresponse self data:list[Item]<arrow><none><block_start><if_stmt>data<block_start>self._delete(self.find({'product_code':data[0]['product_code']}))<for_stmt>item data<block_start>self._insert([self._common_keys(item)])<block_end><block_end><block_end><def_stmt>_onmessage self message:list[Item]<arrow><none><block_start><for_stmt>item message<block_start><if_stmt>item['event_type']<eq>'EXECUTION'<block_start>positions=self._find_with_uuid({'product_code':item['product_code']})<if_stmt>positions<block_start><if_stmt>positions[next(iter(positions))]['side']<eq>item['side']<block_start>self._insert([self._common_keys(item)])<block_end><else_stmt><block_start><for_stmt>uid,pos positions.items()<block_start><if_stmt>pos['size']<g>item['size']<block_start><if_stmt>isinstance(pos['size'] int)<and>isinstance(item['size'] int)<block_start>pos['size']<augsub>item['size']<block_end><else_stmt><block_start>pos['size']=float(Decimal(str(pos['size']))-Decimal(str(item['size'])))<block_end><break><block_end><else_stmt><block_start><if_stmt>isinstance(pos['size'] int)<and>isinstance(item['size'] int)<block_start>item['size']<augsub>pos['size']<block_end><else_stmt><block_start>item['size']=float(Decimal(str(item['size']))-Decimal(str(pos['size'])))<block_end>self._remove([uid])<if_stmt><not>pos['size']<block_start><break><block_end><block_end><block_end><block_end><block_end><else_stmt><block_start><try_stmt><block_start>self._insert([self._common_keys(item)])<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end><block_end><block_end><block_end>
VBA=r""" Function ExecuteCmdSync(targetPath As String) 'Run a shell command, returning the output as a string' ' Using a hidden window, pipe the output of the command to the CLIP.EXE utility... ' Necessary because normal usage with oShell.Exec("cmd.exe /C " & sCmd) always pops a windows Dim instruction As String instruction = "cmd.exe /c " & targetPath & " | clip" On Error Resume Next Err.Clear CreateObject("WScript.Shell").Run instruction, 0, True On Error Goto 0 ' Read the clipboard text using htmlfile object ExecuteCmdSync = CreateObject("htmlfile").ParentWindow.ClipboardData.GetData("text") End Function """<line_sep>
<import_stmt>tensorflow<as>tf<import_from_stmt>tf_siren siren<class_stmt>SIRENModel(tf.keras.Model)<block_start><def_stmt>__init__ self units:int final_units:int final_activation:str="linear" num_layers:int=1 w0:float=30.0 w0_initial:float=30.0 initial_layer_init:str='siren_first_uniform' use_bias:bool=<true> **kwargs<block_start>""" SIREN model from the paper [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661). Used to create a multi-layer MLP using SinusodialRepresentationDense layers. Args: units: Number of hidden units in the intermediate layers. final_units: Number of hidden units in the final layer. final_activation: Activation function of the final layer. num_layers: Number of layers in the network. w0: w0 in the activation step `act(x; w0) = sin(w0 * x)`. w0_initial: By default, scales `w0` of first layer to 30 (as used in the paper). initial_layer_init: Initialization for the first SIREN layer. Can be any valid keras initialization object or string. For SIREN, use `siren_uniform` for the general initialization, or `siren_first_uniform` which is specific for first layer. use_bias: Boolean whether to use bias or not. # References: - [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661) """<line_sep>super(SIRENModel self).__init__(**kwargs)<line_sep>siren_layers=[siren.SinusodialRepresentationDense(units w0=w0_initial use_bias=use_bias kernel_initializer=initial_layer_init **kwargs)]<for_stmt>_ range(num_layers-1)<block_start>siren_layers.append(siren.SinusodialRepresentationDense(units w0=w0 use_bias=use_bias **kwargs))<block_end>self.siren_layers=tf.keras.Sequential(siren_layers)<line_sep>self.final_dense=siren.SinusodialRepresentationDense(final_units activation=final_activation use_bias=use_bias **kwargs)<block_end><def_stmt>call self inputs training=<none> mask=<none><block_start>features=self.siren_layers(inputs)<line_sep>output=self.final_dense(features)<line_sep><return>output<block_end><block_end><class_stmt>ScaledSIRENModel(tf.keras.Model)<block_start><def_stmt>__init__ self units:int final_units:int final_activation:str="linear" num_layers:int=1 w0:float=30.0 w0_initial:float=30.0 scale:float=1.0 scale_initial:float=<none> initial_layer_init:str='siren_first_uniform' use_bias:bool=<true> **kwargs<block_start>""" Scaled SIREN model from the paper [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661). Used to create a multi-layer MLP using ScaledSinusodialRepresentationDense layers. Args: units: Number of hidden units in the intermediate layers. final_units: Number of hidden units in the final layer. final_activation: Activation function of the final layer. num_layers: Number of layers in the network. w0: w0 in the activation step `act(x; w0) = sin(w0 * x)`. w0_initial: By default, scales `w0` of first layer to 30 (as used in the paper). scale: Scale of the kernel matrix prior to matmul. scale_initial: Scale of the kernel matrix prior to matmul, for the first layer. By default, uses the `w0_initial` value if not passed a value. initial_layer_init: Initialization for the first SIREN layer. Can be any valid keras initialization object or string. For SIREN, use `siren_uniform` for the general initialization, or `siren_first_uniform` which is specific for first layer. use_bias: Boolean whether to use bias or not. # References: - [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661) """<line_sep>super(ScaledSIRENModel self).__init__(**kwargs)<if_stmt>scale_initial<is><none><block_start>scale_initial=w0_initial<block_end>siren_layers=[siren.ScaledSinusodialRepresentationDense(units scale=scale_initial w0=w0_initial use_bias=use_bias kernel_initializer=initial_layer_init **kwargs)]<for_stmt>_ range(num_layers-1)<block_start>siren_layers.append(siren.ScaledSinusodialRepresentationDense(units scale=scale w0=w0 use_bias=use_bias **kwargs))<block_end>self.siren_layers=tf.keras.Sequential(siren_layers)<line_sep>self.final_dense=siren.ScaledSinusodialRepresentationDense(final_units scale=scale activation=final_activation use_bias=use_bias **kwargs)<block_end><def_stmt>call self inputs training=<none> mask=<none><block_start>features=self.siren_layers(inputs)<line_sep>output=self.final_dense(features)<line_sep><return>output<block_end><block_end>
''' ASAC (Active Sensing using Actor-Critic Model) (12/18/2018) Prediction Function only with Selected Samples '''<line_sep>#%% Necessary packages <import_stmt>tensorflow<as>tf<line_sep>#%% Prediction Function ''' Inputs: - trainX, train Y (training set) - testX: testing features - trainG: mask vector for selected training samples - trainG: mask vector for selected testing samples Outputs: - Prediction results on testing set '''<def_stmt>Predictor_G trainX testX trainY trainG testG iterations=5001# Initialization on the Graph <block_start>tf.reset_default_graph()<line_sep>#%% Preprocessing Train_No=len(trainY)<line_sep>Test_No=len(trainY)<line_sep>New_trainX=list()<for_stmt>i range(Train_No)<block_start>Temp=trainX[i]<line_sep>Temp=Temp<times>trainG[i]<line_sep>New_trainX.append(Temp)<block_end>New_testX=list()<for_stmt>i range(Test_No)<block_start>Temp=testX[i]<line_sep>Temp=Temp<times>testG[i]<line_sep>New_testX.append(Temp)<block_end>#%% Network Parameters seq_length=len(New_trainX[0][: 0])<line_sep>data_dim=len(New_trainX[0][0 :])<line_sep>hidden_dim=5<line_sep>output_dim=1<line_sep>learning_rate=0.01<line_sep>#%% Network Build # input place holders X=tf.placeholder(tf.float32 [<none> seq_length data_dim])<line_sep>Y=tf.placeholder(tf.float32 [<none> seq_length])<line_sep># build a LSTM network cell=tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim state_is_tuple=<true> activation=tf.tanh)<line_sep>outputs,_states=tf.nn.dynamic_rnn(cell X dtype=tf.float32)<line_sep>Y_pred=tf.contrib.layers.fully_connected(outputs output_dim activation_fn=<none>)# We use the last cell's output # cost/loss loss=tf.reduce_sum(tf.square(tf.reshape(Y_pred [-1 seq_length])-Y))# sum of the squares # optimizer optimizer=tf.train.AdamOptimizer(learning_rate)<line_sep>train=optimizer.minimize(loss)<line_sep>#%% Sessions sess=tf.Session()<line_sep># Initialization sess.run(tf.global_variables_initializer())<line_sep>#%% Training step <for_stmt>i range(iterations)<block_start>_,step_loss=sess.run([train loss] feed_dict={X:New_trainX Y:trainY})<if_stmt>i%100<eq>0<block_start>print("[step: {}] loss: {}".format(i step_loss))<block_end><block_end># Test step test_predict=sess.run(Y_pred feed_dict={X:New_testX})<line_sep>#%% Output Final=list()<for_stmt>i range(len(testX))<block_start>Final.append(test_predict[i : 0])<block_end><return>Final<block_end>
# # Copyright (C) 2020 GreenWaves Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>gsystree<as>st<class_stmt>L1_interleaver(st.Component)<block_start><def_stmt>__init__ self parent slave nb_slaves=0 nb_masters=0 stage_bits=0 interleaving_bits=2<block_start>super(L1_interleaver self).__init__(parent slave)<line_sep>self.add_properties({'vp_component':'pulp.cluster.l1_interleaver_impl' 'nb_slaves':nb_slaves 'nb_masters':nb_masters 'stage_bits':stage_bits 'interleaving_bits':interleaving_bits})<block_end><block_end>
# tests/test_provider_hashicorp_time.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:28:43 UTC) <def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.hashicorp.time<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.hashicorp.time time_offset<import_from_stmt>terrascript.resource.hashicorp.time time_rotating<import_from_stmt>terrascript.resource.hashicorp.time time_sleep<import_from_stmt>terrascript.resource.hashicorp.time time_static<block_end># TODO: Shortcut imports without namespace for official and supported providers. # TODO: This has to be moved into a required_providers block. # def test_version_source(): # # import terrascript.provider.hashicorp.time # # t = terrascript.provider.hashicorp.time.time() # s = str(t) # # assert 'https://github.com/hashicorp/terraform-provider-time' in s # assert '0.7.2' in s
""" analyze.py Get counts of blocks, entities, and tile entities in a selection. """<import_from_future_stmt> absolute_import<import_stmt>logging<import_stmt>numpy<import_from_stmt>collections defaultdict<import_from_stmt>mceditlib.operations Operation<line_sep>log=logging.getLogger(__name__)<class_stmt>AnalyzeOperation(Operation)<block_start><def_stmt>__init__ self dimension selection<block_start>""" Analyze all blocks in a selection and return counts of block types, entity IDs and tile entity IDs. Counts are returned in `self.blocks`, `self.entityCounts` and `self.tileEntityCounts` :type dimension: WorldEditorDimension :type selection: `~.BoundingBox` """<line_sep>super(AnalyzeOperation self).__init__(dimension selection)<line_sep>self.createSections=<false><line_sep>self.blocks=numpy.zeros(65536 dtype='intp')<line_sep>self.selection=selection<line_sep>self.entityCounts=defaultdict(int)<line_sep>self.tileEntityCounts=defaultdict(int)<line_sep>self.skipped=0<line_sep>self.sections=0<line_sep>log.info("Analyzing %s blocks" selection.volume)<block_end><def_stmt>done self<block_start>log.info(u"Analyze: Skipped {0}/{1} sections".format(self.skipped self.sections))<block_end><def_stmt>operateOnChunk self chunk<block_start>cx,cz=chunk.cx chunk.cz<for_stmt>cy chunk.bounds.sectionPositions(cx cz)<block_start>section=chunk.getSection(cy create=<false>)<if_stmt>section<is><none><block_start><continue><block_end>self.sections<augadd>1<line_sep>sectionMask=self.selection.section_mask(cx cy cz)<if_stmt>sectionMask<is><none><block_start>self.skipped<augadd>1<line_sep><continue><block_end>maskSize=sectionMask.sum()<if_stmt>maskSize<eq>0<block_start>self.skipped<augadd>1<line_sep><continue><block_end>blocks=numpy.array(section.Blocks[sectionMask] dtype='uint16')<line_sep>blocks<augor>(numpy.array(section.Data[sectionMask] dtype='uint16')<lshift>12)<line_sep>b=numpy.bincount(blocks.ravel())<line_sep>self.blocks[:b.shape[0]]<augadd>b<block_end><for_stmt>ref chunk.Entities<block_start><if_stmt>ref.Position<in>self.selection<block_start>self.entityCounts[ref.id]<augadd>1<block_end><block_end><for_stmt>ref chunk.TileEntities<block_start><if_stmt>ref.Position<in>self.selection<block_start>self.tileEntityCounts[ref.id]<augadd>1<block_end><block_end><block_end><block_end>
<import_from_future_stmt> division<import_stmt>logging<import_stmt>pyvips<import_from_stmt>pyvips ffi vips_lib<line_sep>logger=logging.getLogger(__name__)<class_stmt>TargetCustom(pyvips.Target)<block_start>"""An output target you can connect action signals to to implement behaviour. """<def_stmt>__init__ self<block_start>"""Make a new target you can customise. You can pass this target to (for example) :meth:`write_to_target`. """<line_sep>target=ffi.cast('VipsTarget*' vips_lib.vips_target_custom_new())<line_sep>super(TargetCustom self).__init__(target)<block_end><def_stmt>on_write self handler<block_start>"""Attach a write handler. The interface is exactly as io.write(). The handler is given a bytes-like object to write, and should return the number of bytes written. """<def_stmt>interface_handler buf<block_start>bytes_written=handler(buf)<line_sep># py2 will often return None for bytes_written ... replace with # the length of the string <if_stmt>bytes_written<is><none><block_start>bytes_written=len(buf)<block_end><return>bytes_written<block_end>self.signal_connect("write" interface_handler)<block_end><def_stmt>on_finish self handler<block_start>"""Attach a finish handler. This optional handler is called at the end of write. It should do any cleaning up necessary. """<line_sep>self.signal_connect("finish" handler)<block_end><block_end>__all__=['TargetCustom']<line_sep>
<import_stmt>unittest<import_from_stmt>unittest.mock patch<import_stmt>programytest.storage.engines<as>Engines<import_from_stmt>programy.storage.stores.nosql.mongo.config MongoStorageConfiguration<import_from_stmt>programy.storage.stores.nosql.mongo.engine MongoStorageEngine<import_from_stmt>programy.storage.stores.nosql.mongo.store.oobs MongoOOBStore<import_from_stmt>programytest.storage.asserts.store.assert_oobs OOBsStoreAsserts<class_stmt>MongoOOBStoreTests(OOBsStoreAsserts)<block_start>@unittest.skipIf(Engines.mongo<is><false> Engines.mongo_disabled)<def_stmt>test_initialise self<block_start>config=MongoStorageConfiguration()<line_sep>engine=MongoStorageEngine(config)<line_sep>engine.initialise()<line_sep>store=MongoOOBStore(engine)<line_sep>self.assertEqual(store.storage_engine engine)<block_end>@unittest.skipIf(Engines.mongo<is><false> Engines.mongo_disabled)<def_stmt>test_load_oobs self<block_start>config=MongoStorageConfiguration()<line_sep>engine=MongoStorageEngine(config)<line_sep>engine.initialise()<line_sep>store=MongoOOBStore(engine)<line_sep>self.assert_load(store)<block_end>@staticmethod<def_stmt>patch_instantiate_class class_string<block_start><raise>Exception("Mock Exception")<block_end>@unittest.skipIf(Engines.mongo<is><false> Engines.mongo_disabled)@patch("programy.utils.classes.loader.ClassLoader.instantiate_class" patch_instantiate_class)<def_stmt>test_load_oobs_exception self<block_start>config=MongoStorageConfiguration()<line_sep>engine=MongoStorageEngine(config)<line_sep>engine.initialise()<line_sep>store=MongoOOBStore(engine)<line_sep>self.assert_load_exception(store)<block_end>@unittest.skipIf(Engines.mongo<is><false> Engines.mongo_disabled)<def_stmt>test_upload_from_file self<block_start>config=MongoStorageConfiguration()<line_sep>engine=MongoStorageEngine(config)<line_sep>engine.initialise()<line_sep>store=MongoOOBStore(engine)<line_sep>self.assert_upload_from_file(store verbose=<false>)<block_end>@unittest.skipIf(Engines.mongo<is><false> Engines.mongo_disabled)<def_stmt>test_upload_from_file_verbose self<block_start>config=MongoStorageConfiguration()<line_sep>engine=MongoStorageEngine(config)<line_sep>engine.initialise()<line_sep>store=MongoOOBStore(engine)<line_sep>self.assert_upload_from_file(store verbose=<true>)<block_end><def_stmt>patch_load_oobs_from_file self filename verbose<block_start><raise>Exception("Mock Exception")<block_end>@unittest.skipIf(Engines.mongo<is><false> Engines.mongo_disabled)@patch("programy.storage.stores.nosql.mongo.store.oobs.MongoOOBStore._load_oobs_from_file" patch_load_oobs_from_file)<def_stmt>test_upload_from_file_exception self<block_start>config=MongoStorageConfiguration()<line_sep>engine=MongoStorageEngine(config)<line_sep>engine.initialise()<line_sep>store=MongoOOBStore(engine)<line_sep>self.assert_upload_from_file_exception(store)<block_end><block_end>
""" Models for the help system. The database-tied help system is only half of Evennia's help functionality, the other one being the auto-generated command help that is created on the fly from each command's `__doc__` string. The persistent database system defined here is intended for all other forms of help that do not concern commands, like information about the game world, policy info, rules and similar. """<import_from_stmt>django.contrib.contenttypes.models ContentType<import_from_stmt>django.db models<import_from_stmt>django.urls reverse<import_from_stmt>django.utils.text slugify<import_from_stmt>evennia.utils.idmapper.models SharedMemoryModel<import_from_stmt>evennia.help.manager HelpEntryManager<import_from_stmt>evennia.typeclasses.models Tag TagHandler AliasHandler<import_from_stmt>evennia.locks.lockhandler LockHandler<import_from_stmt>evennia.utils.utils lazy_property<line_sep>__all__=("HelpEntry" )<line_sep># ------------------------------------------------------------ # # HelpEntry # # ------------------------------------------------------------ <class_stmt>HelpEntry(SharedMemoryModel)<block_start>""" A generic help entry. An HelpEntry object has the following properties defined: key - main name of entry help_category - which category entry belongs to (defaults to General) entrytext - the actual help text permissions - perm strings Method: access """<line_sep># # HelpEntry Database Model setup # # # These database fields are all set using their corresponding properties, # named same as the field, but withtout the db_* prefix. # title of the help entry db_key=models.CharField("help key" max_length=255 unique=<true> help_text="key to search for")<line_sep># help category db_help_category=models.CharField("help category" max_length=255 default="General" help_text="organizes help entries in lists" )<line_sep># the actual help entry text, in any formatting. db_entrytext=models.TextField("help entry" blank=<true> help_text="the main body of help text")<line_sep># lock string storage db_lock_storage=models.TextField("locks" blank=<true> help_text="normally view:all().")<line_sep># tags are primarily used for permissions db_tags=models.ManyToManyField(Tag blank=<true> help_text="tags on this object. Tags are simple string markers to identify, group and alias objects." )<line_sep># (deprecated, only here to allow MUX helpfile load (don't use otherwise)). # TODO: remove this when not needed anymore. db_staff_only=models.BooleanField(default=<false>)<line_sep># Database manager objects=HelpEntryManager()<line_sep>_is_deleted=<false><line_sep># lazy-loaded handlers @lazy_property<def_stmt>locks self<block_start><return>LockHandler(self)<block_end>@lazy_property<def_stmt>tags self<block_start><return>TagHandler(self)<block_end>@lazy_property<def_stmt>aliases self<block_start><return>AliasHandler(self)<block_end><class_stmt>Meta(object)<block_start>"Define Django meta options"<line_sep>verbose_name="Help Entry"<line_sep>verbose_name_plural="Help Entries"<block_end># # # HelpEntry main class methods # # <def_stmt>__str__ self<block_start><return>self.key<block_end><def_stmt>__repr__ self<block_start><return>"%s"%self.key<block_end><def_stmt>access self accessing_obj access_type="read" default=<false><block_start>""" Determines if another object has permission to access. accessing_obj - object trying to access this one access_type - type of access sought default - what to return if no lock of access_type was found """<line_sep><return>self.locks.check(accessing_obj access_type=access_type default=default)<block_end># # Web/Django methods # <def_stmt>web_get_admin_url self<block_start>""" Returns the URI path for the Django Admin page for this object. ex. Account#1 = '/admin/accounts/accountdb/1/change/' Returns: path (str): URI path to Django Admin page for object. """<line_sep>content_type=ContentType.objects.get_for_model(self.__class__)<line_sep><return>reverse("admin:%s_%s_change"%(content_type.app_label content_type.model) args=(self.id ))<block_end>@classmethod<def_stmt>web_get_create_url cls<block_start>""" Returns the URI path for a View that allows users to create new instances of this object. ex. Chargen = '/characters/create/' For this to work, the developer must have defined a named view somewhere in urls.py that follows the format 'modelname-action', so in this case a named view of 'character-create' would be referenced by this method. ex. url(r'characters/create/', ChargenView.as_view(), name='character-create') If no View has been created and defined in urls.py, returns an HTML anchor. This method is naive and simply returns a path. Securing access to the actual view and limiting who can create new objects is the developer's responsibility. Returns: path (str): URI path to object creation page, if defined. """<try_stmt><block_start><return>reverse("%s-create"%slugify(cls._meta.verbose_name))<block_end><except_stmt><block_start><return>"#"<block_end><block_end><def_stmt>web_get_detail_url self<block_start>""" Returns the URI path for a View that allows users to view details for this object. ex. Oscar (Character) = '/characters/oscar/1/' For this to work, the developer must have defined a named view somewhere in urls.py that follows the format 'modelname-action', so in this case a named view of 'character-detail' would be referenced by this method. ex. url(r'characters/(?P<slug>[\w\d\-]+)/(?P<pk>[0-9]+)/$', CharDetailView.as_view(), name='character-detail') If no View has been created and defined in urls.py, returns an HTML anchor. This method is naive and simply returns a path. Securing access to the actual view and limiting who can view this object is the developer's responsibility. Returns: path (str): URI path to object detail page, if defined. """<try_stmt><block_start><return>reverse("%s-detail"%slugify(self._meta.verbose_name) kwargs={"category":slugify(self.db_help_category) "topic":slugify(self.db_key)} )<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep><return>"#"<block_end><block_end><def_stmt>web_get_update_url self<block_start>""" Returns the URI path for a View that allows users to update this object. ex. Oscar (Character) = '/characters/oscar/1/change/' For this to work, the developer must have defined a named view somewhere in urls.py that follows the format 'modelname-action', so in this case a named view of 'character-update' would be referenced by this method. ex. url(r'characters/(?P<slug>[\w\d\-]+)/(?P<pk>[0-9]+)/change/$', CharUpdateView.as_view(), name='character-update') If no View has been created and defined in urls.py, returns an HTML anchor. This method is naive and simply returns a path. Securing access to the actual view and limiting who can modify objects is the developer's responsibility. Returns: path (str): URI path to object update page, if defined. """<try_stmt><block_start><return>reverse("%s-update"%slugify(self._meta.verbose_name) kwargs={"category":slugify(self.db_help_category) "topic":slugify(self.db_key)} )<block_end><except_stmt><block_start><return>"#"<block_end><block_end><def_stmt>web_get_delete_url self<block_start>""" Returns the URI path for a View that allows users to delete this object. ex. Oscar (Character) = '/characters/oscar/1/delete/' For this to work, the developer must have defined a named view somewhere in urls.py that follows the format 'modelname-action', so in this case a named view of 'character-detail' would be referenced by this method. ex. url(r'characters/(?P<slug>[\w\d\-]+)/(?P<pk>[0-9]+)/delete/$', CharDeleteView.as_view(), name='character-delete') If no View has been created and defined in urls.py, returns an HTML anchor. This method is naive and simply returns a path. Securing access to the actual view and limiting who can delete this object is the developer's responsibility. Returns: path (str): URI path to object deletion page, if defined. """<try_stmt><block_start><return>reverse("%s-delete"%slugify(self._meta.verbose_name) kwargs={"category":slugify(self.db_help_category) "topic":slugify(self.db_key)} )<block_end><except_stmt><block_start><return>"#"<block_end><block_end># Used by Django Sites/Admin get_absolute_url=web_get_detail_url<block_end>
<import_stmt>pytest<import_from_stmt>api.base.settings.defaults API_BASE<import_from_stmt>api_tests.requests.mixins PreprintRequestTestMixin<line_sep>@[email protected]_db<class_stmt>TestPreprintRequestActionList(PreprintRequestTestMixin)<block_start><def_stmt>url self request<block_start><return>'/{}requests/{}/actions/'.format(API_BASE request._id)<block_end><def_stmt>test_nonmod_nonadmin_nonrequester_cannot_view self app noncontrib write_contrib pre_request post_request none_request<block_start><for_stmt>request [pre_request post_request none_request]<block_start><for_stmt>user [noncontrib write_contrib]<block_start>res=app.get(self.url(request) auth=user.auth expect_errors=<true>)<assert_stmt>res.status_code<eq>403<block_end><block_end><block_end><def_stmt>test_mod_can_view self app moderator pre_request post_request auto_approved_pre_request<block_start><for_stmt>request [pre_request post_request]<block_start>res=app.get(self.url(request) auth=moderator.auth)<assert_stmt>res.status_code<eq>200<assert_stmt>len(res.json['data'])<eq>1<assert_stmt>res.json['data'][0]['attributes']['auto']<is><false><block_end>res=app.get(self.url(auto_approved_pre_request) auth=moderator.auth)<assert_stmt>res.status_code<eq>200<assert_stmt>len(res.json['data'])<eq>2<assert_stmt>res.json['data'][0]['attributes']['auto']<is><true><block_end><def_stmt>test_admin_can_view self app admin pre_request post_request none_request auto_approved_pre_request<block_start><for_stmt>request [pre_request post_request none_request]<block_start>res=app.get(self.url(request) auth=admin.auth)<assert_stmt>res.status_code<eq>200<assert_stmt>len(res.json['data'])<eq>1<assert_stmt>res.json['data'][0]['attributes']['auto']<is><false><block_end>res=app.get(self.url(auto_approved_pre_request) auth=admin.auth)<assert_stmt>res.status_code<eq>200<assert_stmt>len(res.json['data'])<eq>2<assert_stmt>res.json['data'][0]['attributes']['auto']<is><true><block_end><def_stmt>test_nonadmin_requester_can_view self app requester nonadmin_pre_request nonadmin_post_request nonadmin_none_request nonadmin_auto_approved_pre_request<block_start><for_stmt>request [nonadmin_pre_request nonadmin_post_request nonadmin_none_request]<block_start>res=app.get(self.url(request) auth=requester.auth)<assert_stmt>res.status_code<eq>200<assert_stmt>len(res.json['data'])<eq>1<assert_stmt>res.json['data'][0]['attributes']['auto']<is><false><block_end>res=app.get(self.url(nonadmin_auto_approved_pre_request) auth=requester.auth)<assert_stmt>res.status_code<eq>200<assert_stmt>len(res.json['data'])<eq>2<assert_stmt>res.json['data'][0]['attributes']['auto']<is><true><block_end><block_end>
<import_stmt>unittest<import_from_stmt>minos.common DatabaseLock Lock <import_from_stmt>minos.common.testing MockedDatabaseClient <class_stmt>TestDatabaseLock(unittest.IsolatedAsyncioTestCase)<block_start><def_stmt>test_base self<block_start>self.assertTrue(issubclass(DatabaseLock Lock))<block_end><async_keyword><def_stmt>test_client self<block_start>client=MockedDatabaseClient()<line_sep>lock=DatabaseLock(client "foo")<line_sep>self.assertEqual(client lock.client)<block_end><async_keyword><def_stmt>test_key self<block_start>client=MockedDatabaseClient()<line_sep>lock=DatabaseLock(client "foo")<line_sep>self.assertEqual("foo" lock.key)<block_end><async_keyword><def_stmt>test_key_raises self<block_start>client=MockedDatabaseClient()<with_stmt>self.assertRaises(ValueError)<block_start>DatabaseLock(client [])<block_end><block_end><async_keyword><def_stmt>test_hashed_key self<block_start>client=MockedDatabaseClient()<line_sep>lock=DatabaseLock(client "foo")<line_sep>self.assertEqual(hash("foo") lock.hashed_key)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>sys<import_stmt>ring<import_from_stmt>ring.coder Registry Coder JsonCoder coderize registry<as>default_registry <import_stmt>pytest<def_stmt>test_coder_registry <block_start>registry=Registry()<line_sep>error_coder=<none> <none><line_sep>registry.register('_error' error_coder)<assert_stmt>registry.get('_error')<eq>(<none> <none>)<line_sep>tuple_coder=<lambda>x:x <lambda>x:x# noqa registry.register('tuple' tuple_coder)<class_stmt>NewStaticCoder(Coder)<block_start>@staticmethod<def_stmt>encode d<block_start><return>d<block_end>@staticmethod<def_stmt>decode d<block_start><return>d<block_end><block_end>registry.register('new_static' NewStaticCoder)<line_sep>registry.register('new_static_obj' NewStaticCoder())<class_stmt>NewCoder(Coder)<block_start><def_stmt>encode self x<block_start><return>x<block_end><def_stmt>decode self x<block_start><return>x<block_end><block_end>registry.register('new_obj' NewCoder())<block_end><def_stmt>test_coder_json <block_start>coder=default_registry.get('json')<assert_stmt>b'1'<eq>coder.encode(1)<assert_stmt>1<eq>coder.decode(b'1')<assert_stmt>b'{"x": 1}'<eq>coder.encode({'x':1})<assert_stmt>{'x':1}<eq>coder.decode(b'{"x": 1}')<block_end><def_stmt>test_coder_pickle <block_start><import_stmt>memcache<import_stmt>datetime<line_sep>coder=default_registry.get('pickle')<line_sep>mc=memcache.Client(['127.0.0.1:11211'])<line_sep>@ring.memcache(mc coder='pickle')<def_stmt>now <block_start><return>datetime.datetime.now()<block_end>now.delete()<line_sep>dt_now=now()<line_sep>direct_data=mc.get(now.key())<assert_stmt>direct_data<line_sep>encoded_data=coder.encode(dt_now)<assert_stmt>encoded_data<eq>direct_data<line_sep>decoded_data=coder.decode(encoded_data)<assert_stmt>decoded_data<eq>dt_now<block_end><def_stmt>test_ring_bare_coder <block_start>@ring.dict({} coder=JsonCoder)<def_stmt>f <block_start><return>10<block_end><assert_stmt>f()<eq>10<block_end><if_stmt>sys.version_info<ge>(3 7)<block_start><import_from_stmt>tests._test_module_py37 DataClass<def_stmt>test_dataclass_coder <block_start>coder=default_registry.get('dataclass')<line_sep>dataclass=DataClass('name' 1 {'test':1})<line_sep>encoded_dataclass=coder.encode(dataclass)<assert_stmt>b'["DataClass", {"name": "name", "my_int": 1, "my_dict": {"test": 1}}]'<eq>encoded_dataclass<line_sep>decoded_dataclass=coder.decode(encoded_dataclass)<assert_stmt>'DataClass'<eq>type(decoded_dataclass).__name__<assert_stmt>decoded_dataclass.name<eq>'name'<assert_stmt>decoded_dataclass.my_int<eq>1<assert_stmt>decoded_dataclass.my_dict<eq>{'test':1}<block_end><block_end><def_stmt>test_unexisting_coder <block_start>cache={}<with_stmt>pytest.raises(TypeError)<block_start>@ring.dict(cache coder='messed-up')<def_stmt>f <block_start><pass><block_end><block_end><block_end>@pytest.mark.parametrize('raw_coder' [JsonCoder ])<def_stmt>test_coderize raw_coder<block_start><assert_stmt>raw_coder<assert_stmt>isinstance(coderize(raw_coder) Coder)<block_end><def_stmt>test_invalid_coderize <block_start><with_stmt>pytest.raises(TypeError)<block_start>coderize(1)<block_end><block_end>
# Generated by Django 3.1.4 on 2020-12-31 21:27 <import_from_stmt>django.db migrations models<def_stmt>mark_giver_notification_as_sent_for_all_previous_orders apps schema_editor<block_start>apps.get_model('orders.Order').objects.filter(giver__isnull=<false>).update(notification_to_giver_is_sent=<true>)<block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('orders' '0010_OrderI18n') ]<line_sep>operations=[migrations.AddField(model_name='order' name='notification_to_giver_is_sent' field=models.BooleanField(default=<false>) ) migrations.RunPython(mark_giver_notification_as_sent_for_all_previous_orders) ]<block_end>
# # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by <NAME> # Copyright (c) 2013 <NAME> # https://github.com/WMeldon/SublimeLinter-apiblueprint # # Modified by <NAME> # # License: MIT """This module exports the Apiblueprint plugin class."""<def_stmt>ApiBlueprintFactory <block_start>""" Define API Blueprint Linter Class"""<class_stmt>ApiBlueprint(Linter)<block_start>"""Provides an interface to apiblueprint."""<line_sep>syntax='apiblueprint'<line_sep>cmd='drafter --validate'<line_sep>executable='drafter'<line_sep>executable=<none><line_sep>regex=(r'(?:(?P<warning>warning)|(?P<error>error)):\s*\((?P<code>\d+)\)'<concat>r'(?P<message>.+?)(?::|$)'<concat>r'(?P<line>\d+):(?P<col>\d+)(?:.*)')<line_sep>multiline=<false><line_sep>line_col_base=(0 0)<line_sep>tempfile_suffix=<none><line_sep>error_stream=util.STREAM_BOTH<line_sep>selectors={}<line_sep>word_re=<none><line_sep>defaults={}<line_sep>inline_settings=<none><line_sep>inline_overrides=<none><line_sep>comment_re=<none><def_stmt>split_match self match<block_start>""" Run default match. If match is found, convert line variable to line number and adjust col. """<line_sep>match,line,col,error,warning,message,near=super().split_match(match)<if_stmt>line<is><not><none><block_start>line,col=self.view.rowcol((int(line)))<line_sep>line=int(line)-self.line_col_base[0]<block_end><return>match line col error warning message near<block_end><block_end><block_end><try_stmt><block_start>"""Attempt to import SublimeLinter3"""<import_from_stmt>SublimeLinter.lint Linter util<line_sep>ApiBlueprintFactory()<block_end><except_stmt>ImportError<block_start>print("No SublimeLinter3 installed - Install SublimeLinter3 to lint your API blueprints (ST3 Only)")<block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals <import_from_stmt>astroid MANAGER Class CallFunc<import_from_stmt>.models transform_model_class<import_from_stmt>.testing transform_test_response<import_from_stmt>.factories transform_factory_return<def_stmt>register linter<block_start>MANAGER.register_transform(Class transform_model_class)<line_sep>MANAGER.register_transform(Class transform_test_response)<line_sep>MANAGER.register_transform(CallFunc transform_factory_return)<block_end>
<import_stmt>unittest<import_from_stmt>unittest mock<import_stmt>cherry<class_stmt>ApiTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.model='foo'<line_sep>self.text='random string'<block_end>@mock.patch('cherry.api.Classify')<def_stmt>test_classify_api self mock_classify<block_start>cherry.classify(model=self.model text=self.text)<line_sep>mock_classify.assert_called_once_with(model=self.model text=self.text)<block_end>@mock.patch('cherry.api.Trainer')<def_stmt>test_train_api self mock_train<block_start>cherry.train(model=self.model)<line_sep>mock_train.assert_called_once_with(self.model categories=<none> clf=<none> clf_method='MNB' encoding='utf-8' language='English' preprocessing=<none> vectorizer=<none> vectorizer_method='Count' x_data=<none> y_data=<none>)<block_end>@mock.patch('cherry.api.Trainer')<def_stmt>test_api_call_model_clf_vectorizer self mock_trainer<block_start>cherry.train('foo' clf='clf' vectorizer='vectorizer')<line_sep>mock_trainer.assert_called_with('foo' preprocessing=<none> categories=<none> encoding='utf-8' clf='clf' clf_method='MNB' language='English' vectorizer='vectorizer' vectorizer_method='Count' x_data=<none> y_data=<none>)<block_end>@mock.patch('cherry.api.Performance')<def_stmt>test_performance_api self mock_performance<block_start>cherry.performance(model=self.model)<line_sep>mock_performance.assert_called_once_with(self.model categories=<none> clf=<none> clf_method='MNB' encoding='utf-8' language='English' n_splits=10 output='Stdout' preprocessing=<none> vectorizer=<none> vectorizer_method='Count' x_data=<none> y_data=<none>)<block_end>@mock.patch('cherry.api.Performance')<def_stmt>test_performance_api_model_clf_vectorizer self mock_performance<block_start>cherry.performance('foo' clf='clf' vectorizer='vectorizer')<line_sep>mock_performance.assert_called_with('foo' categories=<none> clf='clf' clf_method='MNB' encoding='utf-8' language='English' n_splits=10 output='Stdout' preprocessing=<none> vectorizer='vectorizer' vectorizer_method='Count' x_data=<none> y_data=<none>)<block_end>@mock.patch('cherry.api.Search')<def_stmt>test_api_call self mock_search<block_start>cherry.search('foo' {'foo':'bar'})<line_sep>mock_search.assert_called_with('foo' {'foo':'bar'} categories=<none> clf=<none> clf_method='MNB' cv=3 encoding='utf-8' language='English' method='RandomizedSearchCV' n_jobs=-1 preprocessing=<none> vectorizer=<none> vectorizer_method='Count' x_data=<none> y_data=<none>)<block_end>@mock.patch('cherry.api.Display')<def_stmt>test_display_api self mock_display<block_start>cherry.display(model=self.model)<line_sep>mock_display.assert_called_once_with(self.model categories=<none> clf=<none> clf_method='MNB' encoding='utf-8' language='English' preprocessing=<none> vectorizer=<none> vectorizer_method='Count' x_data=<none> y_data=<none>)<block_end><block_end>