content
stringlengths 0
1.55M
|
---|
<import_stmt>pytest<import_stmt>salt.renderers.tomlmod<import_stmt>salt.serializers.toml<line_sep>@pytest.mark.skipif(salt.serializers.toml.HAS_TOML<is><false> reason="The 'toml' library is missing")<def_stmt>test_toml_render_string <block_start>data="""[[user-sshkey."ssh_auth.present"]]
user = "username"
[[user-sshkey."ssh_auth.present"]]
config = "%h/.ssh/authorized_keys"
[[user-sshkey."ssh_auth.present"]]
names = [
"hereismykey",
"anotherkey"
]
"""<line_sep>expected_result={"user-sshkey":{"ssh_auth.present":[{"user":"username"} {"config":"%h/.ssh/authorized_keys"} {"names":["hereismykey" "anotherkey"]} ]}}<line_sep>result=salt.renderers.tomlmod.render(data)<assert_stmt>result<eq>expected_result<block_end>
|
<import_stmt>pendulum<import_from_stmt>tests TestCase<import_from_stmt>src.masonite.utils.time migration_timestamp parse_human_time cookie_expire_time <class_stmt>TestTimeUtils(TestCase)<block_start><def_stmt>tearDown self<block_start>super().tearDown()<line_sep>self.restoreTime()<block_end><def_stmt>test_parse_human_time_now self<block_start>ref_time=pendulum.datetime(2021 1 1)<line_sep>self.fakeTime(ref_time)<line_sep>instance=parse_human_time("now")<line_sep>self.assertEqual(ref_time instance)<block_end><def_stmt>test_parse_human_time_expired self<block_start>self.fakeTime(pendulum.datetime(2021 1 1))<line_sep>instance=parse_human_time("expired")<line_sep>self.assertEqual(pendulum.datetime(2001 1 1) instance)<block_end><def_stmt>test_parse_human_time self<block_start>self.fakeTime(pendulum.datetime(2021 1 1 12 0 0))<line_sep>self.assertEqual(pendulum.datetime(2021 1 1 12 0 2) parse_human_time("2 seconds"))<line_sep>self.assertEqual(pendulum.datetime(2021 1 1 12 2 0) parse_human_time("2 minutes"))<line_sep>self.assertEqual(pendulum.datetime(2021 1 1 14 0 0) parse_human_time("2 hour"))<line_sep>self.assertEqual(pendulum.datetime(2021 1 2 12 0 0) parse_human_time("1 day"))<line_sep>self.assertEqual(pendulum.datetime(2021 1 15 12 0 0) parse_human_time("2 weeks"))<line_sep>self.assertEqual(pendulum.datetime(2021 4 1 12 0 0) parse_human_time("3 months"))<line_sep>self.assertEqual(pendulum.datetime(2030 1 1 12 0 0) parse_human_time("9 years"))<line_sep>self.assertEqual(<none> parse_human_time("10 nanoseconds"))<block_end><def_stmt>test_cookie_expire_time self<block_start>self.fakeTime(pendulum.datetime(2021 1 21 7 28 0))<line_sep>expiration_time_str=cookie_expire_time("7 days")<line_sep>self.assertEqual(expiration_time_str "Thu, 28 Jan 2021 07:28:00")<block_end><def_stmt>test_migration_timestamp self<block_start>self.fakeTime(pendulum.datetime(2021 10 25 8 12 54))<line_sep>self.assertEqual(migration_timestamp() "2021_10_25_081254")<block_end><block_end>
|
<import_from_stmt>core.providers.aws.boto3 prepare_aws_client_with_given_cred<import_stmt>boto3<def_stmt>get_es_client aws_auth_cred<block_start>"""
Returns the client object for AWS Elasticsearch
Args:
aws_auth (dict): Dict containing AWS credentials
Returns:
obj: AWS Elasticsearch Object
"""<line_sep><return>prepare_aws_client_with_given_cred("es" aws_auth_cred)<block_end><def_stmt>check_es_domain_exists domain_name aws_auth_cred<block_start>"""
Check wheter the given ES Domain already exists in the AWS Account
Args:
domain_name (str): ES Domain name
aws_auth (dict): Dict containing AWS credentials
Returns:
Boolean: True if env exists else False
"""<line_sep>client=get_es_client(aws_auth_cred)<try_stmt><block_start>response=client.describe_elasticsearch_domain(DomainName=domain_name)<line_sep><return><true><if>response['DomainStatus']<else><false><block_end><except_stmt><block_start><return><false><block_end><block_end>
|
# type: ignore
# pylint: skip-file
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<class_stmt>DoubleConvBlock(nn.Module)<block_start>"""(2D conv => BN => LeakyReLU) * 2"""<def_stmt>__init__ self in_ch out_ch k_size pad dil<block_start>super().__init__()<line_sep>self.block=nn.Sequential(nn.Conv2d(in_ch out_ch kernel_size=k_size padding=pad dilation=dil) nn.BatchNorm2d(out_ch) nn.LeakyReLU(inplace=<true>) nn.Conv2d(out_ch out_ch kernel_size=k_size padding=pad dilation=dil) nn.BatchNorm2d(out_ch) nn.LeakyReLU(inplace=<true>) )<block_end><def_stmt>forward self x<block_start>x=self.block(x)<line_sep><return>x<block_end><block_end><class_stmt>Double3DConvBlock(nn.Module)<block_start>"""(3D conv => BN => LeakyReLU) * 2"""<def_stmt>__init__ self in_ch out_ch k_size pad dil<block_start>super().__init__()<line_sep>self.block=nn.Sequential(nn.Conv3d(in_ch out_ch kernel_size=k_size padding=pad dilation=dil) nn.BatchNorm3d(out_ch) nn.LeakyReLU(inplace=<true>) nn.Conv3d(out_ch out_ch kernel_size=k_size padding=pad dilation=dil) nn.BatchNorm3d(out_ch) nn.LeakyReLU(inplace=<true>) )<block_end><def_stmt>forward self x<block_start>x=self.block(x)<line_sep><return>x<block_end><block_end><class_stmt>ConvBlock(nn.Module)<block_start>"""(2D conv => BN => LeakyReLU)"""<def_stmt>__init__ self in_ch out_ch k_size pad dil<block_start>super().__init__()<line_sep>self.block=nn.Sequential(nn.Conv2d(in_ch out_ch kernel_size=k_size padding=pad dilation=dil) nn.BatchNorm2d(out_ch) nn.LeakyReLU(inplace=<true>) )<block_end><def_stmt>forward self x<block_start>x=self.block(x)<line_sep><return>x<block_end><block_end><class_stmt>ASPPBlock(nn.Module)<block_start>"""Atrous Spatial Pyramid Pooling
Parallel conv blocks with different dilation rate
"""<def_stmt>__init__ self in_ch out_ch=256<block_start>super().__init__()<line_sep>self.global_avg_pool=nn.AvgPool2d((64 64))<line_sep>self.conv1_1x1=nn.Conv2d(in_ch out_ch kernel_size=1 padding=0 dilation=1)<line_sep>self.single_conv_block1_1x1=ConvBlock(in_ch out_ch k_size=1 pad=0 dil=1)<line_sep>self.single_conv_block1_3x3=ConvBlock(in_ch out_ch k_size=3 pad=6 dil=6)<line_sep>self.single_conv_block2_3x3=ConvBlock(in_ch out_ch k_size=3 pad=12 dil=12)<line_sep>self.single_conv_block3_3x3=ConvBlock(in_ch out_ch k_size=3 pad=18 dil=18)<block_end><def_stmt>forward self x<block_start>x1=F.interpolate(self.global_avg_pool(x) size=(64 64) align_corners=<false> mode="bilinear")<line_sep>x1=self.conv1_1x1(x1)<line_sep>x2=self.single_conv_block1_1x1(x)<line_sep>x3=self.single_conv_block1_3x3(x)<line_sep>x4=self.single_conv_block2_3x3(x)<line_sep>x5=self.single_conv_block3_3x3(x)<line_sep>x_cat=torch.cat((x2 x3 x4 x5 x1) 1)<line_sep><return>x_cat<block_end><block_end><class_stmt>EncodingBranch(nn.Module)<block_start>"""
Encoding branch for a single radar view
PARAMETERS
----------
signal_type: str
Type of radar view.
Supported: 'range_doppler', 'range_angle' and 'angle_doppler'
"""<def_stmt>__init__ self signal_type<block_start>super().__init__()<line_sep>self.signal_type=signal_type<line_sep>self.double_3dconv_block1=Double3DConvBlock(in_ch=1 out_ch=128 k_size=3 pad=(0 1 1) dil=1)<line_sep>self.doppler_max_pool=nn.MaxPool2d(2 stride=(2 1))<line_sep>self.max_pool=nn.MaxPool2d(2 stride=2)<line_sep>self.double_conv_block2=DoubleConvBlock(in_ch=128 out_ch=128 k_size=3 pad=1 dil=1)<line_sep>self.single_conv_block1_1x1=ConvBlock(in_ch=128 out_ch=128 k_size=1 pad=0 dil=1)<block_end><def_stmt>forward self x<block_start>x1=self.double_3dconv_block1(x)<line_sep>x1=torch.squeeze(x1 2)# remove temporal dimension
<if_stmt>self.signal_type<in>("range_doppler" "angle_doppler")# The Doppler dimension requires a specific processing
<block_start>x1_pad=F.pad(x1 (0 1 0 0) "constant" 0)<line_sep>x1_down=self.doppler_max_pool(x1_pad)<block_end><else_stmt><block_start>x1_down=self.max_pool(x1)<block_end>x2=self.double_conv_block2(x1_down)<if_stmt>self.signal_type<in>("range_doppler" "angle_doppler")# The Doppler dimension requires a specific processing
<block_start>x2_pad=F.pad(x2 (0 1 0 0) "constant" 0)<line_sep>x2_down=self.doppler_max_pool(x2_pad)<block_end><else_stmt><block_start>x2_down=self.max_pool(x2)<block_end>x3=self.single_conv_block1_1x1(x2_down)<line_sep># return input of ASPP block + latent features
<return>x2_down x3<block_end><block_end><class_stmt>TMVANet_Encoder(nn.Module)<block_start>"""
Temporal Multi-View with ASPP Network (TMVA-Net)
PARAMETERS
----------
n_classes: int
Number of classes used for the semantic segmentation task
n_frames: int
Total numer of frames used as a sequence
"""<def_stmt>__init__ self n_classes n_frames<block_start>super().__init__()<line_sep>self.n_classes=n_classes<line_sep>self.n_frames=n_frames<line_sep># Backbone (encoding)
self.rd_encoding_branch=EncodingBranch("range_doppler")<line_sep>self.ra_encoding_branch=EncodingBranch("range_angle")<line_sep>self.ad_encoding_branch=EncodingBranch("angle_doppler")<line_sep># ASPP Blocks
self.rd_aspp_block=ASPPBlock(in_ch=128 out_ch=128)<line_sep>self.ra_aspp_block=ASPPBlock(in_ch=128 out_ch=128)<line_sep>self.ad_aspp_block=ASPPBlock(in_ch=128 out_ch=128)<line_sep>self.rd_single_conv_block1_1x1=ConvBlock(in_ch=640 out_ch=128 k_size=1 pad=0 dil=1)<line_sep>self.ra_single_conv_block1_1x1=ConvBlock(in_ch=640 out_ch=128 k_size=1 pad=0 dil=1)<line_sep>self.ad_single_conv_block1_1x1=ConvBlock(in_ch=640 out_ch=128 k_size=1 pad=0 dil=1)<block_end><def_stmt>forward self x_rd x_ra x_ad printshape=<false># Backbone
<block_start>ra_features,ra_latent=self.ra_encoding_branch(x_ra)<line_sep>rd_features,rd_latent=self.rd_encoding_branch(x_rd)<line_sep>ad_features,ad_latent=self.ad_encoding_branch(x_ad)<line_sep># ASPP blocks
x1_rd=self.rd_aspp_block(rd_features)<line_sep>x1_ra=self.ra_aspp_block(ra_features)<line_sep>x1_ad=self.ad_aspp_block(ad_features)<line_sep>x2_rd=self.rd_single_conv_block1_1x1(x1_rd)<line_sep>x2_ra=self.ra_single_conv_block1_1x1(x1_ra)<line_sep>x2_ad=self.ad_single_conv_block1_1x1(x1_ad)<line_sep># Features join either the RD or the RA branch
x3=torch.cat((rd_latent ra_latent ad_latent) 1)<line_sep><return>x3 x2_rd x2_ad x2_ra<block_end><block_end><class_stmt>TMVANet_Decoder(nn.Module)<block_start>"""
Temporal Multi-View with ASPP Network (TMVA-Net)
PARAMETERS
----------
n_classes: int
Number of classes used for the semantic segmentation task
n_frames: int
Total numer of frames used as a sequence
"""<def_stmt>__init__ self n_classes n_frames<block_start>super().__init__()<line_sep>self.n_classes=n_classes<line_sep>self.n_frames=n_frames<line_sep># Decoding
self.rd_single_conv_block2_1x1=ConvBlock(in_ch=384 out_ch=128 k_size=1 pad=0 dil=1)<line_sep>self.ra_single_conv_block2_1x1=ConvBlock(in_ch=384 out_ch=128 k_size=1 pad=0 dil=1)<line_sep># Pallel range-Doppler (RD) and range-angle (RA) decoding branches
self.rd_upconv1=nn.ConvTranspose2d(384 128 (2 1) stride=(2 1))<line_sep>self.ra_upconv1=nn.ConvTranspose2d(384 128 2 stride=2)<line_sep>self.rd_double_conv_block1=DoubleConvBlock(in_ch=128 out_ch=128 k_size=3 pad=1 dil=1)<line_sep>self.ra_double_conv_block1=DoubleConvBlock(in_ch=128 out_ch=128 k_size=3 pad=1 dil=1)<line_sep>self.rd_upconv2=nn.ConvTranspose2d(128 128 (2 1) stride=(2 1))<line_sep>self.ra_upconv2=nn.ConvTranspose2d(128 128 2 stride=2)<line_sep>self.rd_double_conv_block2=DoubleConvBlock(in_ch=128 out_ch=128 k_size=3 pad=1 dil=1)<line_sep>self.ra_double_conv_block2=DoubleConvBlock(in_ch=128 out_ch=128 k_size=3 pad=1 dil=1)<line_sep># Final 1D convs
self.rd_final=nn.Conv2d(in_channels=128 out_channels=n_classes kernel_size=1)<line_sep>self.ra_final=nn.Conv2d(in_channels=128 out_channels=n_classes kernel_size=1)<block_end><def_stmt>forward self x3 x2_rd x2_ad x2_ra# Parallel decoding branches with upconvs
# Latent Space
<block_start>x3_rd=self.rd_single_conv_block2_1x1(x3)<line_sep>x3_ra=self.ra_single_conv_block2_1x1(x3)<line_sep># Latent Space + ASPP features
x4_rd=torch.cat((x2_rd x3_rd x2_ad) 1)<line_sep>x4_ra=torch.cat((x2_ra x3_ra x2_ad) 1)<line_sep>x5_rd=self.rd_upconv1(x4_rd)<line_sep>x5_ra=self.ra_upconv1(x4_ra)<line_sep>x6_rd=self.rd_double_conv_block1(x5_rd)<line_sep>x6_ra=self.ra_double_conv_block1(x5_ra)<line_sep>x7_rd=self.rd_upconv2(x6_rd)<line_sep>x7_ra=self.ra_upconv2(x6_ra)<line_sep>x8_rd=self.rd_double_conv_block2(x7_rd)<line_sep>x8_ra=self.ra_double_conv_block2(x7_ra)<line_sep># Final 1D convolutions
x9_rd=self.rd_final(x8_rd)<line_sep>x9_ra=self.ra_final(x8_ra)<line_sep><return>x9_rd x9_ra<block_end><block_end><class_stmt>TMVANet(nn.Module)<block_start>"""
Temporal Multi-View with ASPP Network (TMVA-Net)
PARAMETERS
----------
n_classes: int
Number of classes used for the semantic segmentation task
n_frames: int
Total numer of frames used as a sequence
"""<def_stmt>__init__ self n_classes n_frames<block_start>super().__init__()<line_sep>self.n_classes=n_classes<line_sep>self.n_frames=n_frames<line_sep>self.encoder=TMVANet_Encoder(n_classes n_frames)<line_sep>self.decoder=TMVANet_Decoder(n_classes n_frames)<block_end><def_stmt>forward self x_rd x_ra x_ad<block_start>x3,x2_rd,x2_ad,x2_ra=self.encoder(x_rd x_ra x_ad)<line_sep>x9_rd,x9_ra=self.decoder(x3 x2_rd x2_ad x2_ra)<line_sep><return>x9_rd x9_ra<block_end><block_end>
|
# This sample tests that type aliases can consist of
# partially-specialized classes that can be further
# specialized.
# pyright: strict
<import_from_stmt>typing Callable Generic Literal Tuple Optional TypeVar<import_from_stmt>typing_extensions ParamSpec<line_sep>T=TypeVar("T")<line_sep>P=ParamSpec("P")<line_sep>ValidationResult=Tuple[bool Optional[T]]<def_stmt>foo <arrow>ValidationResult[str]<block_start><return><false> "valid"<block_end><class_stmt>ClassA(Generic[T])<block_start><def_stmt>__new__ cls value:T<arrow>"ClassA[T]"<block_start><ellipsis><block_end><block_end>TypeAliasA=ClassA[T]<line_sep>a1=ClassA(3.0)<line_sep>t_a1:Literal["ClassA[float]"]=reveal_type(a1)<line_sep>a2=TypeAliasA(3.0)<line_sep>t_a2:Literal["ClassA[float]"]=reveal_type(a2)<line_sep>Func=Callable[P T]<line_sep>AnyFunc=Func[P int]<line_sep>AnyFunc[P]<line_sep>
|
<import_from_stmt>.basetrainer BaseTrainer# noqa
<import_from_stmt>.trainer_vqvae VQVAETrainer# noqa
<import_from_stmt>.trainer_lsgan LSGANTrainer# noqa
<import_from_stmt>.trainer_cyclegan CycleGANTrainer# noqa
<import_from_stmt>.trainer_stargan StarGANTrainer# noqa
<import_from_stmt>.basetrainer TrainerWrapper# noqa
|
a=[1 2 3 4 5 ]<line_sep>print(*a)<for_stmt>i a<block_start>print(i end=' ')<block_end>
|
<import_stmt>tensorflow<as>tf sys<line_sep># You will be sending the image to be classified as a parameter
provided_image_path=sys.argv[1]<line_sep># then we will read the image data
provided_image_data=tf.gfile.FastGFile(provided_image_path 'rb').read()<line_sep># Loads label file
label_lines=[line.rstrip()<for>line tf.gfile.GFile("tensorflow_files/retrained_labels.txt")]<line_sep># Unpersists graph from file
<with_stmt>tf.gfile.FastGFile("tensorflow_files/retrained_graph.pb" 'rb')<as>f<block_start>graph_def=tf.GraphDef()<line_sep>graph_def.ParseFromString(f.read())<line_sep>_=tf.import_graph_def(graph_def name='')<block_end><with_stmt>tf.Session()<as>sess# pass the provided_image_data as input to the graph
<block_start>softmax_tensor=sess.graph.get_tensor_by_name('final_result:0')<line_sep>netowrk_predictions=sess.run(softmax_tensor {'DecodeJpeg/contents:0':provided_image_data})<line_sep># Sort the result by confidence to show the flower labels accordingly
top_predictions=netowrk_predictions[0].argsort()[-len(netowrk_predictions[0]):][::-1]<for_stmt>prediction top_predictions<block_start>flower_type=label_lines[prediction]<line_sep>score=netowrk_predictions[0][prediction]<line_sep>print('%s (score = %.5f)'%(flower_type score))<block_end><block_end>
|
<def_stmt>test_func <block_start>""" My cool test.name """<assert_stmt><true><block_end>
|
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_from_stmt>torch.optim lr_scheduler<import_from_stmt>apex amp<import_from_stmt>data_loader create_dataloaders<import_from_stmt>model get_trainable_params create_model print_model_params<import_from_stmt>train train<import_from_stmt>utils parse_and_override_params<import_stmt>foundations<line_sep># Fix random seed
torch.manual_seed(0)<line_sep>np.random.seed(0)<line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.backends.cudnn.benchmark=<false><line_sep>params=foundations.load_parameters()<line_sep>data_dict=parse_and_override_params(params)<line_sep># Set job tags to easily spot data in use
foundations.set_tag(f'{data_dict[params["train_data"]]}: {params["train_data"]}')<line_sep># foundations.set_tag(f'big {params["train_data"]}')
print('Creating datasets')<line_sep># Get dataloaders
train_dl,val_base_dl,val_augment_dl,display_dl_iter=create_dataloaders(params)<line_sep>print('Creating loss function')<line_sep># Loss function
criterion=nn.CrossEntropyLoss()<line_sep>print('Creating model')<line_sep># Create model, freeze layers and change last layer
model=create_model(bool(params['use_hidden_layer']) params['dropout'])<line_sep>_=print_model_params(model)<line_sep>params_to_update=get_trainable_params(model)<line_sep>print('Creating optimizer')<line_sep># Create optimizer and learning rate schedules
optimizer=optim.Adam(params_to_update lr=params['max_lr'] weight_decay=params['weight_decay'])<line_sep>model,optimizer=amp.initialize(model optimizer opt_level='O1' verbosity=0)<line_sep># Learning rate scheme
<if_stmt>bool(params['use_lr_scheduler'])<block_start>step_size_up=int(params['n_epochs']<times>len(train_dl)<times>0.3)<line_sep>step_size_down=params['n_epochs']<times>len(train_dl)-step_size_up<line_sep>scheduler=lr_scheduler.OneCycleLR(optimizer params['max_lr'] total_steps=<none> epochs=params['n_epochs'] steps_per_epoch=len(train_dl) pct_start=params['pct_start'] anneal_strategy='cos' cycle_momentum=<false>)<block_end><else_stmt><block_start>scheduler=<none><block_end>print('Training start..')<line_sep># Train
train(train_dl val_base_dl val_augment_dl display_dl_iter model optimizer params['n_epochs'] params['max_lr'] scheduler criterion train_source=params["train_data"])<line_sep>
|
EXAMPLE=<true><line_sep>MYSQL_HOST="development.com"<line_sep>VERSION=1<line_sep>AGE=15<line_sep>NAME="MIKE"<line_sep>IMAGE_1="aaa"<line_sep>IMAGE_2="bbb"<line_sep>IMAGE_4="a"<line_sep>IMAGE_5="b"<line_sep>
|
<import_from_stmt>tito.builder Builder<class_stmt>AtomicReactorBuilder(Builder)<block_start><def_stmt>__init__ self **kwargs<block_start>super(AtomicReactorBuilder self).__init__(**kwargs)<line_sep># tarball has to represent Source0
# but internal structure should remain same
# i.e. {name}-{version} otherwise %setup -q
# will fail
self.tgz_filename=self.display_version+".tar.gz"<block_end><block_end>
|
<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>functools partial<import_stmt>os<import_from_stmt>os listdir<import_from_stmt>os.path isfile join<import_stmt>shutil<import_stmt>sys<import_from_stmt>glob glob<import_stmt>math<import_stmt>json<import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_from_stmt>datetime datetime<import_from_stmt>tensorflow.core.framework summary_pb2<def_stmt>make_summary name val<block_start><return>summary_pb2.Summary(value=[summary_pb2.Summary.Value(tag=name simple_value=val)])<block_end><def_stmt>summary_stats name tensor collections=<none> hist=<false><block_start>collections=collections<or>[tf.GraphKeys.SUMMARIES]<line_sep>ave=tf.reduce_mean(tensor)<line_sep>std=tf.sqrt(tf.reduce_mean(tf.square(ave-tensor)))<line_sep>tf.summary.scalar(name+'_ave' ave collections)<line_sep>tf.summary.scalar(name+'_std' std collections)<if_stmt>hist<block_start>tf.summary.histogram(name+'_hist' tensor collections)<block_end><block_end><def_stmt>prepare_dirs_and_logger config<block_start><if_stmt>config.load_path<block_start>strip_lp=config.load_path.strip('./')<if_stmt>strip_lp.startswith(config.log_dir)<block_start>config.model_dir=config.load_path<block_end><else_stmt><block_start><if_stmt>config.load_path.startswith(config.dataset)<block_start>config.model_name=config.load_path<block_end><else_stmt><block_start>config.model_name="{}_{}".format(config.dataset config.load_path)<block_end><block_end><block_end><else_stmt>#new model
<block_start>config.model_name="{}_{}".format(config.dataset get_time())<if_stmt>config.descrip<block_start>config.model_name<augadd>'_'+config.descrip<block_end><block_end><if_stmt><not>hasattr(config 'model_dir')<block_start>config.model_dir=os.path.join(config.log_dir config.model_name)<block_end>config.data_path=os.path.join(config.data_dir config.dataset)<if_stmt><not>config.load_path<block_start>config.log_code_dir=os.path.join(config.model_dir 'code')<for_stmt>path [config.log_dir config.data_dir config.model_dir]<block_start><if_stmt><not>os.path.exists(path)<block_start>os.makedirs(path)<block_end><block_end>#Copy python code in directory into model_dir/code for future reference:
#All python files in this directory are copied.
code_dir=os.path.dirname(os.path.realpath(sys.argv[0]))<line_sep>##additionally, all python files in these directories are also copied. Also symlinks are copied. The idea is to allow easier model loading in the future
allowed_dirs=['causal_controller' 'causal_began' 'causal_dcgan' 'figure_scripts']<line_sep>#ignore copy of all non-*.py except for these directories
#If you make another folder you want copied, you have to add it here
ignore_these=partial(ignore_except allowed_dirs=allowed_dirs)<line_sep>shutil.copytree(code_dir config.log_code_dir symlinks=<true> ignore=ignore_these)<block_end><block_end># model_files = [f for f in listdir(code_dir) if isfile(join(code_dir, f))]
# for f in model_files:
# if f.endswith('.py'):
# shutil.copy2(f,config.log_code_dir)
<def_stmt>ignore_except src contents allowed_dirs<block_start>files=filter(os.path.isfile contents)<line_sep>dirs=filter(os.path.isdir contents)<line_sep>ignored_files=[f<for>f files<if><not>f.endswith('.py')]<line_sep>ignored_dirs=[d<for>d dirs<if><not>d<in>allowed_dirs]<line_sep><return>ignored_files+ignored_dirs<block_end><def_stmt>get_time <block_start><return>datetime.now().strftime("%m%d_%H%M%S")<block_end><def_stmt>save_configs config cc_config dcgan_config began_config<block_start>model_dir=config.model_dir<line_sep>print("[*] MODEL dir: %s"%model_dir)<line_sep>save_config(config)<line_sep>save_config(cc_config 'cc_params.json' model_dir)<line_sep>save_config(dcgan_config 'dcgan_params.json' model_dir)<line_sep>save_config(began_config 'began_params.json' model_dir)<block_end><def_stmt>save_config config name="params.json" where=<none><block_start>where=where<or>config.model_dir<line_sep>param_path=os.path.join(where name)<line_sep>print("[*] PARAM path: %s"%param_path)<with_stmt>open(param_path 'w')<as>fp<block_start>json.dump(config.__dict__ fp indent=4 sort_keys=<true>)<block_end><block_end><def_stmt>get_available_gpus <block_start><import_from_stmt>tensorflow.python.client device_lib<line_sep>local_device_protos=device_lib.list_local_devices()<line_sep><return>[x.name<for>x local_device_protos<if>x.device_type<eq>'GPU']<block_end><def_stmt>distribute_input_data data_loader num_gpu<block_start>'''
data_loader is a dictionary of tensors that are fed into our model
This function takes that dictionary of n*batch_size dimension tensors
and breaks it up into n dictionaries with the same key of tensors with
dimension batch_size. One is given to each gpu
'''<if_stmt>num_gpu<eq>0<block_start><return>{'/cpu:0':data_loader}<block_end>gpus=get_available_gpus()<if_stmt>num_gpu<g>len(gpus)<block_start><raise>ValueError('number of gpus specified={}, more than gpus available={}'.format(num_gpu len(gpus)))<block_end>gpus=gpus[:num_gpu]<line_sep>data_by_gpu={g:{}<for>g gpus}<for_stmt>key,value data_loader.items()<block_start>spl_vals=tf.split(value num_gpu)<for_stmt>gpu,val zip(gpus spl_vals)<block_start>data_by_gpu[gpu][key]=val<block_end><block_end><return>data_by_gpu<block_end><def_stmt>rank array<block_start><return>len(array.shape)<block_end><def_stmt>make_grid tensor nrow=8 padding=2 normalize=<false> scale_each=<false><block_start>"""Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py
minor improvement, row/col was reversed"""<line_sep>nmaps=tensor.shape[0]<line_sep>ymaps=min(nrow nmaps)<line_sep>xmaps=int(math.ceil(float(nmaps)/ymaps))<line_sep>height,width=int(tensor.shape[1]+padding) int(tensor.shape[2]+padding)<line_sep>grid=np.zeros([height<times>ymaps+1+padding<floordiv>2 width<times>xmaps+1+padding<floordiv>2 3] dtype=np.uint8)<line_sep>k=0<for_stmt>y range(ymaps)<block_start><for_stmt>x range(xmaps)<block_start><if_stmt>k<ge>nmaps<block_start><break><block_end>h,h_width=y<times>height+1+padding<floordiv>2 height-padding<line_sep>w,w_width=x<times>width+1+padding<floordiv>2 width-padding<line_sep>grid[h:h+h_width w:w+w_width]=tensor[k]<line_sep>k=k+1<block_end><block_end><return>grid<block_end><def_stmt>save_image tensor filename nrow=8 padding=2 normalize=<false> scale_each=<false><block_start>ndarr=make_grid(tensor nrow=nrow padding=padding normalize=normalize scale_each=scale_each)<line_sep>im=Image.fromarray(ndarr)<line_sep>im.save(filename)<block_end>
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
@File : opt_clean.py
@Author : wangqiuliang
@Date : 2019-03-18
@Desc : parse python function for ut of erase class
"""<import_from_stmt>dataclasses dataclass<line_sep># Test_Erase_class
@dataclass<class_stmt>Point<block_start>x:float<line_sep>y:float<def_stmt>product self<block_start><return>self.x<times>self.y<block_end><block_end><def_stmt>test_erase_class_fn p_in<block_start>p=Point(p_in)<line_sep><return>p.x<times>p.y<block_end>
|
<import_from_stmt>robot.libraries.BuiltIn BuiltIn<def_stmt>invalidate_driver <block_start>sl=BuiltIn().get_library_instance("SeleniumLibrary")<line_sep>sl.register_driver(<none> "tidii")<line_sep>sl.register_driver(<none> "foobar")<block_end>
|
<import_from_stmt>nmmo.entity.entity Entity<import_from_stmt>nmmo.entity.player Player<line_sep>
|
<def_stmt>binary_search arr item<block_start>low=0<line_sep>high=len(arr)-1<line_sep>result=-1<while_stmt>(low<le>high)<block_start>mid=(low+high)<floordiv>2<if_stmt>item<eq>arr[mid]<block_start>result=mid<line_sep>high=mid-1<block_end><elif_stmt>(item<l>arr[mid])<block_start>high=mid-1<block_end><else_stmt><block_start>low=mid+1<block_end><block_end><return>result<block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>modAL.utils.combination make_linear_combination make_product<import_from_stmt>modAL.utils.selection multi_argmax<import_from_stmt>modAL.uncertainty classifier_uncertainty classifier_margin<import_from_stmt>modAL.models ActiveLearner<import_from_stmt>sklearn.datasets make_blobs<import_from_stmt>sklearn.gaussian_process GaussianProcessClassifier<import_from_stmt>sklearn.gaussian_process.kernels RBF<line_sep># generating the data
centers=np.asarray([[-2 3] [0.5 5] [1 1.5]])<line_sep>X,y=make_blobs(n_features=2 n_samples=1000 random_state=0 cluster_std=0.7 centers=centers)<line_sep># initial training data
initial_idx=np.random.choice(range(len(X)) size=20)<line_sep>X_training,y_training=X[initial_idx] y[initial_idx]<line_sep># initializing the learner
learner=ActiveLearner(estimator=GaussianProcessClassifier(1.0<times>RBF(1.0)) X_training=X_training y_training=y_training)<line_sep># creating new utility measures by linear combination and product
# linear_combination will return 1.0*classifier_uncertainty + 1.0*classifier_margin
linear_combination=make_linear_combination(classifier_uncertainty classifier_margin weights=[1.0 1.0])<line_sep># product will return (classifier_uncertainty**0.5)*(classifier_margin**0.1)
product=make_product(classifier_uncertainty classifier_margin exponents=[0.5 0.1])<line_sep># defining the custom query strategy, which uses the linear combination of
# classifier uncertainty and classifier margin
<def_stmt>custom_query_strategy classifier X n_instances=1<block_start>utility=linear_combination(classifier X)<line_sep><return>multi_argmax(utility n_instances=n_instances)<block_end>custom_query_learner=ActiveLearner(estimator=GaussianProcessClassifier(1.0<times>RBF(1.0)) query_strategy=custom_query_strategy X_training=X_training y_training=y_training)<line_sep># pool-based sampling
n_queries=20<for_stmt>idx range(n_queries)<block_start>query_idx,query_instance=custom_query_learner.query(X n_instances=2)<line_sep>custom_query_learner.teach(X=X[query_idx].reshape(-1 2) y=y[query_idx].reshape(-1 ))<block_end>
|
<import_stmt>clr<line_sep>clr.AddReference('RevitAPI')<import_from_stmt>Autodesk.Revit.DB *<line_sep>items=UnwrapElement(IN[0])<line_sep>booleans=list()<for_stmt>item items<block_start><try_stmt><block_start><if_stmt>item.CurtainGrids<block_start>booleans.append(<true>)<block_end><else_stmt><block_start>booleans.append(<false>)<block_end><block_end><except_stmt><block_start>booleans.append(<false>)<block_end><block_end>OUT=booleans<line_sep>
|
# -*- coding: utf-8 -*-
'''
Tencent is pleased to support the open source community by making FAutoTest available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
'''<import_from_stmt>fastAutoTest.core.wx.wxUserAPI ActionType<import_from_stmt>fastAutoTest.core.wx.wxUserAPI ByType<class_stmt>WxCommandManager(object)# 使用$$可以作为格式化时的转义
<block_start>_elementMap={ByType.ID:"$$('#$id')[0]" ByType.NAME:"$$('.$name')[$index]" ByType.XPATH:"var xpath ='$xpath';"<concat>"xpath_obj = document.evaluate(xpath,document,null, XPathResult.ANY_TYPE, null);"<concat>"var button = xpath_obj.iterateNext()"}<line_sep># doCommandWithElement中执行的参数
_jsActionMap={ActionType.GET_ELEMENT_RECT:";left=Math.round(button.getBoundingClientRect().left);"<concat>"right=Math.round(button.getBoundingClientRect().right);"<concat>"bottom=Math.round(button.getBoundingClientRect().bottom);"<concat>"topp=Math.round(button.getBoundingClientRect().top);"<concat>"x=Math.round((left+right)/2);"<concat>"y=Math.round((topp+bottom)/2);" ActionType.IS_ELEMENT_EXIST:";button" ActionType.GET_ELEMENT_TEXT:";button.textContent;" ActionType.GET_ELEMENT_SRC:";button.getAttribute('src')" }<line_sep>_methodMap={ActionType.GET_DOCUMENT:"DOM.getDocument" ActionType.GET_HTML:"DOM.getOuterHTML" ActionType.SCROLL:"Input.synthesizeScrollGesture" ActionType.CLICK:"Input.synthesizeTapGesture" ActionType.GET_ELEMENT_RECT:"Runtime.evaluate" ActionType.GET_PICKER_RECT:"Runtime.evaluate" ActionType.GET_ELEMENT_TEXT:"Runtime.evaluate" ActionType.GET_ELEMENT_SRC:"Runtime.evaluate" ActionType.GET_PAGE_HEIGHT:"Runtime.evaluate" ActionType.GET_JS_VALUE:"Runtime.evaluate" ActionType.TEXT:"Input.dispatchKeyEvent" ActionType.IS_ELEMENT_EXIST:"Runtime.evaluate" ActionType.GET_WINDOW_HEIGHT:"Runtime.evaluate" ActionType.GET_WINDOW_WIDTH:"Runtime.evaluate"}<line_sep># string.Template
# jsonConcat最终拼接的模板
_paramsMap={"Runtime.evaluate":'{"expression": "$expression"}' "Input.synthesizeScrollGesture":'{"type": "mouseWheel", "x": $x, "y": $y,"xDistance": $xDistance, "yDistance": $yDistance,"speed":$speed}' "Page.navigate":'{"url":"$url"}' "Input.dispatchKeyEvent":'{"type":"$type","text":"$text","unmodifiedText":"$text"}' "Input.synthesizeTapGesture":'{"x":$x,"y":$y}' "DOM.getDocument":"{''}" "DOM.getOuterHTML":'{"nodeId": $nodeId}' }<line_sep># doCommandWithoutElement 中执行的参数
_expressionMap={ActionType.GET_PAGE_HEIGHT:'document.body.scrollHeight' ActionType.GET_JS_VALUE:'$value' ActionType.GET_WINDOW_HEIGHT:'document.documentElement.clientHeight' ActionType.GET_WINDOW_WIDTH:"document.documentElement.clientWidth"}<def_stmt>getElement self actionType default=<none><block_start><return>self._elementMap.get(actionType default)<block_end><def_stmt>getJsAction self actionType default=<none><block_start><return>self._jsActionMap.get(actionType default)<block_end><def_stmt>getMethod self actionType default=<none><block_start><return>self._methodMap.get(actionType default)<block_end><def_stmt>getParams self actionType default=<none><block_start><return>self._paramsMap.get(actionType default)<block_end><def_stmt>getExpression self actionType default=<none><block_start><return>self._expressionMap.get(actionType default)<block_end><block_end>
|
<import_stmt>logging<import_stmt>random<import_stmt>string<import_stmt>requests<import_stmt>json<import_from_stmt>constance config<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>rest_framework.permissions IsAuthenticated<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.views APIView<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seaserv seafile_api<import_from_stmt>seahub.utils.repo get_available_repo_perms get_repo_owner<import_from_stmt>seahub.base.templatetags.seahub_tags email2nickname<import_from_stmt>seahub.constants PERMISSION_READ PERMISSION_READ_WRITE<import_from_stmt>seahub.ocm.models OCMShareReceived OCMShare<import_from_stmt>seahub.ocm.settings ENABLE_OCM SUPPORTED_OCM_PROTOCOLS OCM_SEAFILE_PROTOCOL OCM_RESOURCE_TYPE_LIBRARY OCM_API_VERSION OCM_SHARE_TYPES OCM_ENDPOINT OCM_PROVIDER_ID OCM_NOTIFICATION_TYPE_LIST OCM_NOTIFICATION_SHARE_UNSHARED OCM_NOTIFICATION_SHARE_DECLINED OCM_PROTOCOL_URL OCM_NOTIFICATION_URL OCM_CREATE_SHARE_URL OCM_REMOTE_SERVERS<line_sep>logger=logging.getLogger(__name__)<line_sep># Convert seafile permission to ocm protocol standard permission
SEAFILE_PERMISSION2OCM_PERMISSION={PERMISSION_READ:['read'] PERMISSION_READ_WRITE:['read' 'write'] }<def_stmt>get_server_name_by_url url<block_start><for_stmt>name_domain_dict OCM_REMOTE_SERVERS<block_start><if_stmt>name_domain_dict['server_url']<eq>url<block_start><return>name_domain_dict['server_name']<block_end><block_end><block_end><def_stmt>gen_shared_secret length=23<block_start><return>''.join(random.choice(string.ascii_lowercase+string.digits)<for>i range(length))<block_end><def_stmt>get_remote_protocol url<block_start>response=requests.get(url)<line_sep><return>json.loads(response.text)<block_end><def_stmt>is_valid_url url<block_start><if_stmt><not>url.startswith('https://')<and><not>url.startswith('http://')<block_start><return><false><block_end><if_stmt><not>url.endswith('/')<block_start><return><false><block_end><return><true><block_end><def_stmt>check_url_slash url<block_start><if_stmt><not>url.endswith('/')<block_start>url<augadd>'/'<block_end><return>url<block_end><class_stmt>OCMProtocolView(APIView)<block_start>throttle_classes=(UserRateThrottle )<def_stmt>get self request<block_start>"""
return ocm protocol info to remote server
"""<line_sep># TODO
# currently if ENABLE_OCM is False, return 404 as if ocm protocol is not implemented
# ocm protocol is not clear about this, https://github.com/GEANT/OCM-API/pull/37
<if_stmt><not>ENABLE_OCM<block_start>error_msg='feature not enabled.'<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end>result={'enabled':<true> 'apiVersion':OCM_API_VERSION 'endPoint':config.SERVICE_URL+'/'+OCM_ENDPOINT 'resourceTypes':{'name':OCM_RESOURCE_TYPE_LIBRARY 'shareTypes':OCM_SHARE_TYPES 'protocols':{OCM_SEAFILE_PROTOCOL:OCM_SEAFILE_PROTOCOL }}}<line_sep><return>Response(result)<block_end><block_end><class_stmt>OCMSharesView(APIView)<block_start>throttle_classes=(UserRateThrottle )<def_stmt>post self request<block_start>"""
create ocm in consumer server
"""<line_sep># argument check
share_with=request.data.get('shareWith' '')<if_stmt><not>share_with<block_start>error_msg='shareWith invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end># curently only support repo share
repo_name=request.data.get('name' '')<if_stmt><not>repo_name<block_start>error_msg='name invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>sender=request.data.get('sender' '')<if_stmt><not>sender<block_start>error_msg='sender invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>share_type=request.data.get('shareType' '')<if_stmt>share_type<not><in>OCM_SHARE_TYPES<block_start>error_msg='shareType %s invalid.'%share_type<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>resource_type=request.data.get('resourceType' '')<if_stmt>resource_type<ne>OCM_RESOURCE_TYPE_LIBRARY<block_start>error_msg='resourceType %s invalid.'%resource_type<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>provider_id=request.data.get('providerId' '')<if_stmt><not>provider_id<block_start>error_msg='providerId invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>"""
other ocm protocol fields currently not used
description = request.data.get('description', '')
owner = request.data.get('owner', '')
ownerDisplayName = request.data.get('ownerDisplayName', '')
senderDisplayName = request.data.get('senderDisplayName', '')
"""<line_sep>protocol=request.data.get('protocol' '')<if_stmt><not>protocol<block_start>error_msg='protocol invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>'name'<not><in>protocol.keys()<block_start>error_msg='protocol.name invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>protocol['name']<not><in>SUPPORTED_OCM_PROTOCOLS<block_start>error_msg='protocol %s not support.'%protocol['name']<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>'options'<not><in>protocol.keys()<block_start>error_msg='protocol.options invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>'sharedSecret'<not><in>protocol['options'].keys()<block_start>error_msg='protocol.options.sharedSecret invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>'permissions'<not><in>protocol['options'].keys()<block_start>error_msg='protocol.options.permissions invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>protocol['name']<eq>OCM_SEAFILE_PROTOCOL<block_start><if_stmt>'repoId'<not><in>protocol['options'].keys()<block_start>error_msg='protocol.options.repoId invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>'seafileServiceURL'<not><in>protocol['options'].keys()<block_start>error_msg='protocol.options.seafileServiceURL invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><block_end><if_stmt>protocol['name']<eq>OCM_SEAFILE_PROTOCOL<block_start>shared_secret=protocol['options']['sharedSecret']<line_sep>permissions=protocol['options']['permissions']<line_sep>repo_id=protocol['options']['repoId']<line_sep>from_server_url=protocol['options']['seafileServiceURL']<block_end><if_stmt>OCMShareReceived.objects.filter(from_user=sender to_user=share_with from_server_url=from_server_url repo_id=repo_id repo_name=repo_name provider_id=provider_id ).exists()<block_start><return>api_error(status.HTTP_400_BAD_REQUEST 'same share already exists.')<block_end><if_stmt>'write'<in>permissions<block_start>permission=PERMISSION_READ_WRITE<block_end><else_stmt><block_start>permission=PERMISSION_READ<block_end>OCMShareReceived.objects.add(shared_secret=shared_secret from_user=sender to_user=share_with from_server_url=from_server_url repo_id=repo_id repo_name=repo_name permission=permission provider_id=provider_id )<line_sep><return>Response(request.data status=status.HTTP_201_CREATED)<block_end><block_end><class_stmt>OCMNotificationsView(APIView)<block_start>throttle_classes=(UserRateThrottle )<def_stmt>post self request<block_start>""" Handle notifications from remote server
"""<line_sep>notification_type=request.data.get('notificationType' '')<if_stmt><not>notification_type<block_start>error_msg='notificationType %s invalid.'%notification_type<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>notification_type<not><in>OCM_NOTIFICATION_TYPE_LIST<block_start>error_msg='notificationType %s not supportd.'%notification_type<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>resource_type=request.data.get('resourceType' '')<if_stmt>resource_type<ne>OCM_RESOURCE_TYPE_LIBRARY<block_start>error_msg='resourceType %s invalid.'%resource_type<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>notification=request.data.get('notification' '')<if_stmt><not>notification<block_start>error_msg='notification invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>shared_secret=notification.get('sharedSecret' '')<if_stmt><not>shared_secret<block_start>error_msg='sharedSecret invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><if_stmt>notification_type<eq>OCM_NOTIFICATION_SHARE_UNSHARED<block_start>"""
Provider unshared, then delete ocm_share_received record on Consumer
"""<try_stmt><block_start>ocm_share_received=OCMShareReceived.objects.get(shared_secret=shared_secret)<block_end><except_stmt>OCMShareReceived.DoesNotExist<block_start><return>Response(request.data)<block_end><if_stmt>ocm_share_received<block_start><try_stmt><block_start>ocm_share_received.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Invernal Server Error')<block_end><block_end><block_end><elif_stmt>notification_type<eq>OCM_NOTIFICATION_SHARE_DECLINED<block_start>"""
Consumer declined share, then delete ocm_share record on Provider
"""<try_stmt><block_start>ocm_share=OCMShare.objects.get(shared_secret=shared_secret)<block_end><except_stmt>OCMShareReceived.DoesNotExist<block_start><return>Response(request.data)<block_end><if_stmt>ocm_share<block_start><try_stmt><block_start>ocm_share.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Invernal Server Error')<block_end><block_end><block_end><return>Response(request.data)<block_end><block_end><class_stmt>OCMSharesPrepareView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request<block_start>"""
list ocm shares of request user, filt by repo_id
"""<line_sep>repo_id=request.GET.get('repo_id' '')<if_stmt>repo_id<block_start>ocm_shares=OCMShare.objects.filter(repo_id=repo_id from_user=request.user.username)<block_end><else_stmt><block_start>ocm_shares=OCMShare.objects.filter(from_user=request.user.username)<block_end>ocm_share_list=[]<for_stmt>ocm_share ocm_shares<block_start>ocm_info=ocm_share.to_dict()<line_sep>ocm_info['to_server_name']=get_server_name_by_url(ocm_share.to_server_url)<line_sep>ocm_share_list.append(ocm_info)<block_end><return>Response({'ocm_share_list':ocm_share_list})<block_end><def_stmt>post self request<block_start>"""
prepare provider server info for ocm, and send post request to consumer
three step:
1. send get request to remote server, ask if support ocm, and get other info
2. send post request to remote server, remote server create a recored in remote
ocm_share_received table
3. store a recored in local ocm_share table
"""<line_sep># argument check
to_user=request.data.get('to_user' '')<if_stmt><not>to_user<block_start>error_msg='to_user invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>to_server_url=request.data.get('to_server_url' '').lower().strip()<if_stmt><not>to_server_url<or><not>is_valid_url(to_server_url)<block_start>error_msg='to_server_url %s invalid.'%to_server_url<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>repo_id=request.data.get('repo_id' '')<if_stmt><not>repo_id<block_start>error_msg='repo_id invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>repo=seafile_api.get_repo(repo_id)<if_stmt><not>repo<block_start><return>api_error(status.HTTP_404_NOT_FOUND 'Library %s not found.'%repo_id)<block_end>path=request.data.get('path' '/')<line_sep># TODO
# 1. folder check
# 2. encrypted repo check
#
# if seafile_api.get_dir_id_by_path(repo.id, path) is None:
# return api_error(status.HTTP_404_NOT_FOUND, 'Folder %s not found.' % path)
#
# if repo.encrypted and path != '/':
# return api_error(status.HTTP_400_BAD_REQUEST, 'Folder invalid.')
permission=request.data.get('permission' PERMISSION_READ)<if_stmt>permission<not><in>get_available_repo_perms()<block_start><return>api_error(status.HTTP_400_BAD_REQUEST 'permission invalid.')<block_end>username=request.user.username<line_sep>repo_owner=get_repo_owner(request repo_id)<if_stmt>repo_owner<ne>username<block_start><return>api_error(status.HTTP_403_FORBIDDEN 'Permission denied.')<block_end><if_stmt>OCMShare.objects.filter(from_user=request.user.username to_user=to_user to_server_url=to_server_url repo_id=repo_id repo_name=repo.repo_name path=path ).exists()<block_start><return>api_error(status.HTTP_400_BAD_REQUEST 'same share already exists.')<block_end>consumer_protocol=get_remote_protocol(to_server_url+OCM_PROTOCOL_URL)<line_sep>shared_secret=gen_shared_secret()<line_sep>from_user=username<line_sep>post_data={'shareWith':to_user 'name':repo.repo_name 'description':'' 'providerId':OCM_PROVIDER_ID 'owner':repo_owner 'sender':from_user 'ownerDisplayName':email2nickname(repo_owner) 'senderDisplayName':email2nickname(from_user) 'shareType':consumer_protocol['resourceTypes']['shareTypes'][0] # currently only support user type
'resourceType':consumer_protocol['resourceTypes']['name'] # currently only support repo
'protocol':{'name':OCM_SEAFILE_PROTOCOL 'options':{'sharedSecret':shared_secret 'permissions':SEAFILE_PERMISSION2OCM_PERMISSION[permission] 'repoId':repo_id 'seafileServiceURL':check_url_slash(config.SERVICE_URL) } } }<line_sep>url=consumer_protocol['endPoint']+OCM_CREATE_SHARE_URL<try_stmt><block_start>requests.post(url json=post_data)<block_end><except_stmt>Exception<as>e<block_start>logging.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end>ocm_share=OCMShare.objects.add(shared_secret=shared_secret from_user=request.user.username to_user=to_user to_server_url=to_server_url repo_id=repo_id repo_name=repo.repo_name path=path permission=permission )<line_sep>ocm_info=ocm_share.to_dict()<line_sep>ocm_info['to_server_name']=get_server_name_by_url(ocm_share.to_server_url)<line_sep><return>Response(ocm_info)<block_end><block_end><class_stmt>OCMSharePrepareView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>delete self request pk<block_start>"""
delete an share received record
"""<try_stmt><block_start>ocm_share=OCMShare.objects.get(pk=pk)<block_end><except_stmt>OCMShareReceived.DoesNotExist<block_start>error_msg='OCMShare %s not found.'%pk<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><if_stmt>ocm_share.from_user<ne>request.user.username<block_start>error_msg='permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end>to_server_url=ocm_share.to_server_url<line_sep>shared_secret=ocm_share.shared_secret<line_sep>consumer_protocol=get_remote_protocol(to_server_url+OCM_PROTOCOL_URL)<line_sep># send unshare notification to consumer
post_data={'notificationType':OCM_NOTIFICATION_SHARE_UNSHARED 'resourceType':OCM_RESOURCE_TYPE_LIBRARY 'providerId':OCM_PROVIDER_ID 'notification':{'sharedSecret':shared_secret 'message':'' } }<line_sep>url=consumer_protocol['endPoint']+OCM_NOTIFICATION_URL<try_stmt><block_start>requests.post(url json=post_data)<block_end><except_stmt>Exception<as>e<block_start>logging.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><try_stmt><block_start>ocm_share.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><return>Response({'success':<true>})<block_end><block_end><class_stmt>OCMSharesReceivedView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request<block_start>"""
list ocm shares received
"""<line_sep>ocm_share_received_list=[]<line_sep>ocm_shares_received=OCMShareReceived.objects.filter(to_user=request.user.username)<for_stmt>ocm_share_received ocm_shares_received<block_start>ocm_share_received_list.append(ocm_share_received.to_dict())<block_end><return>Response({'ocm_share_received_list':ocm_share_received_list})<block_end><block_end><class_stmt>OCMShareReceivedView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>delete self request pk<block_start>"""
delete an share received record
"""<try_stmt><block_start>ocm_share_received=OCMShareReceived.objects.get(pk=pk)<block_end><except_stmt>OCMShareReceived.DoesNotExist<block_start>error_msg='OCMShareReceived %s not found.'%pk<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><if_stmt>ocm_share_received.to_user<ne>request.user.username<block_start>error_msg='permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end>from_server_url=ocm_share_received.from_server_url<line_sep>shared_secret=ocm_share_received.shared_secret<line_sep>provider_protocol=get_remote_protocol(from_server_url+OCM_PROTOCOL_URL)<line_sep># send unshare notification to consumer
post_data={'notificationType':OCM_NOTIFICATION_SHARE_DECLINED 'resourceType':OCM_RESOURCE_TYPE_LIBRARY 'providerId':OCM_PROVIDER_ID 'notification':{'sharedSecret':shared_secret 'message':'' } }<line_sep>url=provider_protocol['endPoint']+OCM_NOTIFICATION_URL<try_stmt><block_start>requests.post(url json=post_data)<block_end><except_stmt>Exception<as>e<block_start>logging.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><try_stmt><block_start>ocm_share_received.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><return>Response({'success':<true>})<block_end><block_end>
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Factory method for easily getting imdbs by name."""<line_sep>__sets={}<import_stmt>datasets.tabletop_object<import_stmt>datasets.osd_object<import_stmt>datasets.ocid_object<import_stmt>numpy<as>np<line_sep># tabletop object dataset
<for_stmt>split ['train' 'test' 'all']<block_start>name='tabletop_object_{}'.format(split)<line_sep>print(name)<line_sep>__sets[name]=(<lambda>split=split:datasets.TableTopObject(split))<block_end># OSD object dataset
<for_stmt>split ['test']<block_start>name='osd_object_{}'.format(split)<line_sep>print(name)<line_sep>__sets[name]=(<lambda>split=split:datasets.OSDObject(split))<block_end># OCID object dataset
<for_stmt>split ['test']<block_start>name='ocid_object_{}'.format(split)<line_sep>print(name)<line_sep>__sets[name]=(<lambda>split=split:datasets.OCIDObject(split))<block_end><def_stmt>get_dataset name<block_start>"""Get an imdb (image database) by name."""<if_stmt>name<not><in>__sets<block_start><raise>KeyError('Unknown dataset: {}'.format(name))<block_end><return>__sets[name]()<block_end><def_stmt>list_datasets <block_start>"""List all registered imdbs."""<line_sep><return>__sets.keys()<block_end>
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for 1D GMM with K > 2 number of components"""<import_stmt>logging<import_stmt>unittest<line_sep># Comments after imports suggest alternative comment style (for original tutorial)
<import_stmt>beanmachine.ppl<as>bm<import_stmt>torch# from torch import manual_seed, tensor
<import_stmt>torch.distributions<as>dist# from torch.distributions import Bernoulli, Normal, Uniform
<import_from_stmt>beanmachine.ppl.inference.bmg_inference BMGInference<import_from_stmt>torch tensor<line_sep># This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)<line_sep>torch.manual_seed(42)<line_sep># Model
<class_stmt>GaussianMixtureModel(object)<block_start><def_stmt>__init__ self k<block_start>self.K=k<block_end>@bm.random_variable<def_stmt>alpha self k<block_start><return>dist.Dirichlet(5<times>torch.ones(k))<block_end>@bm.random_variable<def_stmt>mu self c<block_start><return>dist.Normal(0 10)<block_end>@bm.random_variable<def_stmt>sigma self c<block_start><return>dist.Gamma(1 10)<block_end>@bm.random_variable<def_stmt>component self i<block_start>alpha=self.alpha(self.K)<line_sep><return>dist.Categorical(alpha)<block_end>@bm.random_variable<def_stmt>y self i<block_start>c=self.component(i)<line_sep><return>dist.Normal(self.mu(c) self.sigma(c))<block_end><block_end># Creating sample data
n=6# num observations
k=4# true number of clusters
gmm=GaussianMixtureModel(k=k)<line_sep>ground_truth={**{gmm.alpha(k):torch.ones(k)<times>1.0/k } **{gmm.mu(i):tensor(i%2).float()<for>i range(k)} **{gmm.sigma(i):tensor(0.1)<for>i range(k)} **{gmm.component(i):tensor(i%k).float()<for>i range(n)} }<line_sep># [Visualization code in tutorial skipped]
# Inference parameters
num_samples=(1###00 Sample size should not affect (the ability to find) compilation issues.
)<line_sep>queries=([gmm.alpha(gmm.K)]+[gmm.component(j)<for>j range(n)]+[gmm.mu(i)<for>i range(k)]+[gmm.sigma(i)<for>i range(k)])<line_sep>observations={gmm.y(i):ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())]<for>i range(n)}<class_stmt>tutorialGMMwith1DimensionsAnd4Components(unittest.TestCase)<block_start><def_stmt>test_tutorial_GMM_with_1_dimensions_and_4_components self<arrow><none><block_start>"""Check BM and BMG inference both terminate"""<line_sep>self.maxDiff=<none><line_sep># Inference with BM
torch.manual_seed(42)<line_sep># Note: Second time we seed. Could be a good tutorial style
mh=bm.CompositionalInference({<ellipsis>:bm.SingleSiteNewtonianMonteCarlo()})<line_sep>mh.infer(queries observations num_samples=num_samples num_chains=1 )<line_sep>self.assertTrue(<true> msg="We just want to check this point is reached")<block_end><def_stmt>test_tutorial_GMM_with_1_dimensions_and_4_components_to_dot_cpp_python self <arrow><none><block_start>self.maxDiff=<none><line_sep>observed=BMGInference().to_dot(queries observations)<line_sep>expected="""digraph "graph" {
N00[label="[5.0,5.0,5.0,5.0]"];
N01[label=Dirichlet];
N02[label=Sample];
N03[label=Categorical];
N04[label=Sample];
N05[label=0.0];
N06[label=10.0];
N07[label=Normal];
N08[label=Sample];
N09[label=Sample];
N10[label=Sample];
N11[label=Sample];
N12[label=1.0];
N13[label=Gamma];
N14[label=Sample];
N15[label=Sample];
N16[label=Sample];
N17[label=Sample];
N18[label=Choice];
N19[label=Choice];
N20[label=Normal];
N21[label=Sample];
N22[label="Observation 0.0"];
N23[label=Sample];
N24[label=Choice];
N25[label=Choice];
N26[label=Normal];
N27[label=Sample];
N28[label="Observation 1.0"];
N29[label=Sample];
N30[label=Choice];
N31[label=Choice];
N32[label=Normal];
N33[label=Sample];
N34[label="Observation 0.0"];
N35[label=Sample];
N36[label=Choice];
N37[label=Choice];
N38[label=Normal];
N39[label=Sample];
N40[label="Observation 1.0"];
N41[label=Sample];
N42[label=Choice];
N43[label=Choice];
N44[label=Normal];
N45[label=Sample];
N46[label="Observation 0.0"];
N47[label=Sample];
N48[label=Choice];
N49[label=Choice];
N50[label=Normal];
N51[label=Sample];
N52[label="Observation 1.0"];
N53[label=Query];
N54[label=Query];
N55[label=Query];
N56[label=Query];
N57[label=Query];
N58[label=Query];
N59[label=Query];
N60[label=Query];
N61[label=Query];
N62[label=Query];
N63[label=Query];
N64[label=Query];
N65[label=Query];
N66[label=Query];
N67[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N03;
N02 -> N53;
N03 -> N04;
N03 -> N23;
N03 -> N29;
N03 -> N35;
N03 -> N41;
N03 -> N47;
N04 -> N18;
N04 -> N19;
N04 -> N54;
N05 -> N07;
N06 -> N07;
N06 -> N13;
N07 -> N08;
N07 -> N09;
N07 -> N10;
N07 -> N11;
N08 -> N18;
N08 -> N24;
N08 -> N30;
N08 -> N36;
N08 -> N42;
N08 -> N48;
N08 -> N60;
N09 -> N18;
N09 -> N24;
N09 -> N30;
N09 -> N36;
N09 -> N42;
N09 -> N48;
N09 -> N61;
N10 -> N18;
N10 -> N24;
N10 -> N30;
N10 -> N36;
N10 -> N42;
N10 -> N48;
N10 -> N62;
N11 -> N18;
N11 -> N24;
N11 -> N30;
N11 -> N36;
N11 -> N42;
N11 -> N48;
N11 -> N63;
N12 -> N13;
N13 -> N14;
N13 -> N15;
N13 -> N16;
N13 -> N17;
N14 -> N19;
N14 -> N25;
N14 -> N31;
N14 -> N37;
N14 -> N43;
N14 -> N49;
N14 -> N64;
N15 -> N19;
N15 -> N25;
N15 -> N31;
N15 -> N37;
N15 -> N43;
N15 -> N49;
N15 -> N65;
N16 -> N19;
N16 -> N25;
N16 -> N31;
N16 -> N37;
N16 -> N43;
N16 -> N49;
N16 -> N66;
N17 -> N19;
N17 -> N25;
N17 -> N31;
N17 -> N37;
N17 -> N43;
N17 -> N49;
N17 -> N67;
N18 -> N20;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N23 -> N24;
N23 -> N25;
N23 -> N55;
N24 -> N26;
N25 -> N26;
N26 -> N27;
N27 -> N28;
N29 -> N30;
N29 -> N31;
N29 -> N56;
N30 -> N32;
N31 -> N32;
N32 -> N33;
N33 -> N34;
N35 -> N36;
N35 -> N37;
N35 -> N57;
N36 -> N38;
N37 -> N38;
N38 -> N39;
N39 -> N40;
N41 -> N42;
N41 -> N43;
N41 -> N58;
N42 -> N44;
N43 -> N44;
N44 -> N45;
N45 -> N46;
N47 -> N48;
N47 -> N49;
N47 -> N59;
N48 -> N50;
N49 -> N50;
N50 -> N51;
N51 -> N52;
}
"""<line_sep>self.assertEqual(expected.strip() observed.strip())<line_sep>observed=BMGInference().to_cpp(queries observations)<line_sep>expected="""graph::Graph g;
Eigen::MatrixXd m0(4, 1)
m0 << 5.0, 5.0, 5.0, 5.0;
uint n0 = g.add_constant_pos_matrix(m0);
uint n1 = g.add_distribution(
graph::DistributionType::DIRICHLET,
graph::ValueType(
graph::VariableType::COL_SIMPLEX_MATRIX,
graph::AtomicType::PROBABILITY,
4,
1
),
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_distribution(
graph::DistributionType::CATEGORICAL,
graph::AtomicType::NATURAL,
std::vector<uint>({n2}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n5 = g.add_constant(0.0);
uint n6 = g.add_constant_pos_real(10.0);
uint n7 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n5, n6}));
uint n8 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n9 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n10 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n11 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n12 = g.add_constant_pos_real(1.0);
uint n13 = g.add_distribution(
graph::DistributionType::GAMMA,
graph::AtomicType::POS_REAL,
std::vector<uint>({n12, n6}));
uint n14 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n15 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n16 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n17 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n18 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n4, n8, n9, n10, n11}));
uint n19 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n4, n14, n15, n16, n17}));
uint n20 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n18, n19}));
uint n21 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n20}));
g.observe([n21], 0.0);
uint n22 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n23 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n22, n8, n9, n10, n11}));
uint n24 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n22, n14, n15, n16, n17}));
uint n25 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n23, n24}));
uint n26 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n25}));
g.observe([n26], 1.0);
uint n27 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n28 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n27, n8, n9, n10, n11}));
uint n29 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n27, n14, n15, n16, n17}));
uint n30 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n28, n29}));
uint n31 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n30}));
g.observe([n31], 0.0);
uint n32 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n33 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n32, n8, n9, n10, n11}));
uint n34 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n32, n14, n15, n16, n17}));
uint n35 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n33, n34}));
uint n36 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n35}));
g.observe([n36], 1.0);
uint n37 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n38 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n37, n8, n9, n10, n11}));
uint n39 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n37, n14, n15, n16, n17}));
uint n40 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n38, n39}));
uint n41 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n40}));
g.observe([n41], 0.0);
uint n42 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n43 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n42, n8, n9, n10, n11}));
uint n44 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n42, n14, n15, n16, n17}));
uint n45 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n43, n44}));
uint n46 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n45}));
g.observe([n46], 1.0);
uint q0 = g.query(n2);
uint q1 = g.query(n4);
uint q2 = g.query(n22);
uint q3 = g.query(n27);
uint q4 = g.query(n32);
uint q5 = g.query(n37);
uint q6 = g.query(n42);
uint q7 = g.query(n8);
uint q8 = g.query(n9);
uint q9 = g.query(n10);
uint q10 = g.query(n11);
uint q11 = g.query(n14);
uint q12 = g.query(n15);
uint q13 = g.query(n16);
uint q14 = g.query(n17);
"""<line_sep>self.assertEqual(expected.strip() observed.strip())<block_end><block_end>
|
<for_stmt>i range(int(input()))<block_start>fact=1<line_sep>a=int(input())<for_stmt>j range(1 a+1 1)<block_start>fact=fact<times>j<block_end>print(fact)<block_end>
|
<import_from_stmt>typing NamedTuple Dict List<import_stmt>numpy<import_stmt>pyworld<line_sep>_min_mc=-18.3<class_stmt>Wave(NamedTuple)<block_start>wave:numpy.ndarray<line_sep>sampling_rate:int<block_end><class_stmt>AcousticFeature(NamedTuple)<block_start>f0:numpy.ndarray=numpy.nan<line_sep>spectrogram:numpy.ndarray=numpy.nan<line_sep>aperiodicity:numpy.ndarray=numpy.nan<line_sep>mfcc:numpy.ndarray=numpy.nan<line_sep>voiced:numpy.ndarray=numpy.nan<line_sep>@staticmethod<def_stmt>dtypes <block_start><return>dict(f0=numpy.float32 spectrogram=numpy.float32 aperiodicity=numpy.float32 mfcc=numpy.float32 voiced=numpy.bool )<block_end><def_stmt>astype self dtype<block_start><return>AcousticFeature(f0=self.f0.astype(dtype) spectrogram=self.spectrogram.astype(dtype) aperiodicity=self.aperiodicity.astype(dtype) mfcc=self.mfcc.astype(dtype) voiced=self.voiced.astype(dtype) )<block_end><def_stmt>astype_only_float self dtype<block_start><return>AcousticFeature(f0=self.f0.astype(dtype) spectrogram=self.spectrogram.astype(dtype) aperiodicity=self.aperiodicity.astype(dtype) mfcc=self.mfcc.astype(dtype) voiced=self.voiced )<block_end><def_stmt>validate self<block_start><assert_stmt>self.f0.ndim<eq>2<assert_stmt>self.spectrogram.ndim<eq>2<assert_stmt>self.aperiodicity.ndim<eq>2<assert_stmt>self.mfcc.ndim<eq>2<assert_stmt>self.voiced.ndim<eq>2<line_sep>len_time=len(self.f0)<assert_stmt>len(self.spectrogram)<eq>len_time<assert_stmt>len(self.aperiodicity)<eq>len_time<assert_stmt>len(self.mfcc)<eq>len_time<assert_stmt>len(self.voiced)<eq>len_time<assert_stmt>self.voiced.dtype<eq>numpy.bool<block_end>@staticmethod<def_stmt>silent length:int sizes:Dict[str int] keys:List[str]<block_start>d={}<if_stmt>'f0'<in>keys<block_start>d['f0']=numpy.zeros((length sizes['f0']) dtype=AcousticFeature.dtypes()['f0'])<block_end><if_stmt>'spectrogram'<in>keys<block_start>d['spectrogram']=numpy.zeros((length sizes['spectrogram']) dtype=AcousticFeature.dtypes()['spectrogram'])<block_end><if_stmt>'aperiodicity'<in>keys<block_start>d['aperiodicity']=numpy.zeros((length sizes['aperiodicity']) dtype=AcousticFeature.dtypes()['aperiodicity'])<block_end><if_stmt>'mfcc'<in>keys<block_start>d['mfcc']=numpy.hstack((numpy.ones((length 1) dtype=AcousticFeature.dtypes()['mfcc'])<times>_min_mc numpy.zeros((length sizes['mfcc']-1) dtype=AcousticFeature.dtypes()['mfcc'])))<block_end><if_stmt>'voiced'<in>keys<block_start>d['voiced']=numpy.zeros((length sizes['voiced']) dtype=AcousticFeature.dtypes()['voiced'])<block_end>feature=AcousticFeature(**d)<line_sep><return>feature<block_end>@staticmethod<def_stmt>concatenate fs:List['AcousticFeature'] keys:List[str]<block_start>is_target=<lambda>a:<not>numpy.any(numpy.isnan(a))<line_sep><return>AcousticFeature(**{key:numpy.concatenate([getattr(f key)<for>f fs])<if>is_target(getattr(fs[0] key))<else>numpy.nan<for>key keys})<block_end><def_stmt>pick self first:int last:int<block_start>is_target=<lambda>a:<not>numpy.any(numpy.isnan(a))<line_sep><return>AcousticFeature(f0=self.f0[first:last]<if>is_target(self.f0)<else>numpy.nan spectrogram=self.spectrogram[first:last]<if>is_target(self.spectrogram)<else>numpy.nan aperiodicity=self.aperiodicity[first:last]<if>is_target(self.aperiodicity)<else>numpy.nan mfcc=self.mfcc[first:last]<if>is_target(self.mfcc)<else>numpy.nan voiced=self.voiced[first:last]<if>is_target(self.voiced)<else>numpy.nan )<block_end>@staticmethod<def_stmt>get_sizes sampling_rate:int order:int<block_start>fft_size=pyworld.get_cheaptrick_fft_size(fs=sampling_rate)<line_sep><return>dict(f0=1 spectrogram=fft_size<floordiv>2+1 aperiodicity=fft_size<floordiv>2+1 mfcc=order+1 voiced=1 )<block_end><block_end><class_stmt>LowHighSpectrogramFeature(NamedTuple)<block_start>low:numpy.ndarray<line_sep>high:numpy.ndarray<def_stmt>validate self<block_start><assert_stmt>self.low.ndim<eq>2<assert_stmt>self.high.ndim<eq>2<assert_stmt>self.low.shape<eq>self.high.shape<block_end><block_end>
|
createElement=React.createElement<line_sep>createContext=React.createContext<line_sep>forwardRef=React.forwardRef<line_sep>Component=ReactComponent=React.Component<line_sep>useState=React.useState<line_sep>useEffect=React.useEffect<line_sep>useContext=React.useContext<line_sep>useReducer=React.useReducer<line_sep>useCallback=React.useCallback<line_sep>useMemo=React.useMemo<line_sep>useRef=React.useRef<line_sep>useImperativeHandle=React.useImperativeHandle<line_sep>useLayoutEffect=React.useLayoutEffect<line_sep>useDebugValue=React.useDebugValue<def_stmt>withDeps *deps<block_start>useHook=this<def_stmt>decorator fn<block_start>useHook(fn deps)<line_sep><return>fn<block_end><return>decorator<block_end>useEffect.withDeps=withDeps<line_sep>useLayoutEffect.withDeps=withDeps<def_stmt>useCallbackWithDeps *deps<block_start><def_stmt>decorator fn<block_start><return>React.useCallback(fn deps)<block_end><return>decorator<block_end>useCallback.withDeps=useCallbackWithDeps<line_sep>
|
<class_stmt>dotLoadCommonAttributes_t(object)# no doc
<block_start>aPartFilter=<none><line_sep>AutomaticPrimaryAxisWeight=<none><line_sep>BoundingBoxDx=<none><line_sep>BoundingBoxDy=<none><line_sep>BoundingBoxDz=<none><line_sep>CreateFixedSupportConditionsAutomatically=<none><line_sep>FatherId=<none><line_sep>LoadAttachment=<none><line_sep>LoadDispersionAngle=<none><line_sep>LoadGroupId=<none><line_sep>ModelObject=<none><line_sep>PartNames=<none><line_sep>PrimaryAxisDirection=<none><line_sep>Spanning=<none><line_sep>Weight=<none><block_end>
|
__test__=<false><def_stmt>do_import <block_start><import_stmt>encodings.idna<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<import_stmt>eventlet<line_sep>eventlet.monkey_patch()<line_sep>threading=eventlet.patcher.original('threading')<line_sep>sys.modules.pop('encodings.idna' <none>)<line_sep># call "import encodings.idna" in a new thread
thread=threading.Thread(target=do_import)<line_sep>thread.start()<line_sep># call "import encodings.idna" in the main thread
do_import()<line_sep>thread.join()<line_sep>print('pass')<block_end>
|
<import_from_stmt>allennlp.modules.token_embedders TokenEmbedder PretrainedTransformerEmbedder<import_from_stmt>allennlp.modules.scalar_mix ScalarMix<line_sep>@TokenEmbedder.register("intermediate_pretrained_transformer")<class_stmt>IntermediatePretrainedTransformerEmbedder(PretrainedTransformerEmbedder)<block_start><def_stmt>__init__ self layer_index:int **kwargs<arrow><none><block_start>super().__init__(**kwargs last_layer_only=<false>)<line_sep>initial_scalar_parameters=[-1e9<for>_ range(self.config.num_hidden_layers)]<line_sep>initial_scalar_parameters[layer_index]=0<line_sep>self._scalar_mix=ScalarMix(self.config.num_hidden_layers initial_scalar_parameters=initial_scalar_parameters trainable=<false> do_layer_norm=<false> )<block_end><block_end>
|
<import_from_stmt>.Provider Provider<import_from_stmt>whitenoise WhiteNoise<import_stmt>os<class_stmt>WhitenoiseProvider(Provider)<block_start><def_stmt>__init__ self application<block_start>self.application=application<block_end><def_stmt>register self<block_start>response_handler=WhiteNoise(self.application.get_response_handler() root=self.application.get_storage_path() autorefresh=<true> )<for_stmt>location,alias (self.application.make("storage_capsule").get_storage_assets().items())<block_start>response_handler.add_files(location prefix=alias)<block_end>self.application.set_response_handler(response_handler)<block_end><def_stmt>boot self<block_start><return><block_end><block_end>
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>framework.configbase<import_stmt>framework.ops<line_sep>'''
Vanilla Encoder: embed nd array (batch_size, ..., dim_ft)
- EncoderConfig
- Encoder
Multilayer Perceptrons: feed forward networks + softmax
- MLPConfig
- MLP
'''<class_stmt>EncoderConfig(framework.configbase.ModuleConfig)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.dim_fts=[2048]<line_sep>self.dim_embed=512<line_sep>self.is_embed=<true><line_sep>self.dropout=0<line_sep>self.norm=<false><line_sep>self.nonlinear=<false><block_end><def_stmt>_assert self<block_start><if_stmt><not>self.is_embed<block_start><assert_stmt>self.dim_embed<eq>sum(self.dim_fts)<block_end><block_end><block_end><class_stmt>Encoder(nn.Module)<block_start><def_stmt>__init__ self config<block_start>super().__init__()<line_sep>self.config=config<if_stmt>self.config.is_embed<block_start>self.ft_embed=nn.Linear(sum(self.config.dim_fts) self.config.dim_embed)<block_end>self.dropout=nn.Dropout(self.config.dropout)<block_end><def_stmt>forward self fts<block_start>'''
Args:
fts: size=(batch, ..., sum(dim_fts))
Returns:
embeds: size=(batch, dim_embed)
'''<line_sep>embeds=fts<if_stmt>self.config.is_embed<block_start>embeds=self.ft_embed(embeds)<block_end><if_stmt>self.config.nonlinear<block_start>embeds=F.relu(embeds)<block_end><if_stmt>self.config.norm<block_start>embeds=framework.ops.l2norm(embeds)<block_end>embeds=self.dropout(embeds)<line_sep><return>embeds<block_end><block_end>
|
<import_stmt>numpy<as>np<import_stmt>input_data<import_from_stmt>sklearn.utils shuffle<line_sep>np.random.seed(9999)<line_sep>mnist=input_data.read_data_sets('../MNIST_data' one_hot=<true>)<line_sep>X_train=mnist.train.images<line_sep>t_train=mnist.train.labels<line_sep>X_test=mnist.test.images<line_sep>t_test=mnist.test.labels<line_sep>X_train,t_train=shuffle(X_train t_train)<line_sep># Model
W1=np.random.randn(784 100)<times>0.01<line_sep>W2=np.random.randn(100 10)<times>0.01<def_stmt>softmax x<block_start>ex=np.exp(x-np.max(x axis=1)[: <none>])<line_sep><return>ex/ex.sum(axis=1)[: <none>]<block_end><def_stmt>NLL z t<block_start><return>-np.mean(np.sum(t<times>np.log(softmax(z)+eps) axis=1))<block_end>m=200# mb size
alpha=0.001<line_sep>rho1=0.9# Decay for F
rho2=0.999# Momentum
s1=np.zeros_like(W1)<line_sep>r1=np.zeros_like(W1)<line_sep>s2=np.zeros_like(W2)<line_sep>r2=np.zeros_like(W2)<line_sep>eps=1e-8<line_sep># Visualization stuffs
losses=[]<line_sep># Training
<for_stmt>i range(1 5000)<block_start>X_mb,t_mb=mnist.train.next_batch(m)<line_sep>t_mb_idx=t_mb.argmax(axis=1)<line_sep># Forward
a=X_mb@W1<line_sep>h=np.maximum(a 0)<line_sep>z=h@W2<line_sep>loss=NLL(z t_mb)<line_sep># Loss
<if_stmt>(i-1)%100<eq>0<block_start>print(f'Iter-{i}; Loss: {loss:.3f}')<block_end>losses.append(loss<if>i<eq>1<else>0.99<times>losses[-1]+0.01<times>loss)<line_sep>m=z.shape[0]<line_sep># Gradients
dz=softmax(z)<line_sep>dz[range(dz.shape[0]) t_mb_idx]<augsub>1# m*10
dz<augdiv>m<line_sep>dW2=h.T@dz# 100*10
[email protected]# m*100
dh[a<l>0]=0# ReLU
dW1=X_mb.T@dh# 784*100
# Moments
s1=rho1<times>s1+(1-rho1)<times>dW1<line_sep>r1=rho2<times>r1+(1-rho2)<times>(dW1<times>dW1)<line_sep>s2=rho1<times>s2+(1-rho1)<times>dW2<line_sep>r2=rho2<times>r2+(1-rho2)<times>(dW2<times>dW2)<line_sep># r = rho2*r + (1-rho2)*(m*g*g) # Corresponds to diagonal approx. of FIM
# Bias correction
s1_=s1/(1-rho1<power>i)<line_sep>r1_=r1/(1-rho2<power>i)<line_sep>s2_=s2/(1-rho1<power>i)<line_sep>r2_=r2/(1-rho2<power>i)<line_sep># Step
delta1=s1_/(np.sqrt(r1_)+eps)<line_sep>delta2=s2_/(np.sqrt(r2_)+eps)<line_sep># delta = s_ / (r_ + eps) # Inverse of diagonal FIM
# W = W - alpha * g # SGD update
W1=W1-alpha<times>delta1<line_sep>W2=W2-alpha<times>delta2<block_end>y=softmax(np.maximum(X_test@W1 0)@W2).argmax(axis=1)<line_sep>acc=np.mean(y<eq>t_test.argmax(axis=1))<line_sep>print(f'Accuracy: {acc:.3f}')<line_sep>np.save('adam_losses.npy' losses)<line_sep>
|
add_library('opencv_processing')<line_sep>img=<none><line_sep>opencv=<none><def_stmt>setup <block_start>img=loadImage("test.jpg")<line_sep>size(img.width img.height P2D)<line_sep>opencv=OpenCV(this img)<block_end><def_stmt>draw <block_start>opencv.loadImage(img)<line_sep>opencv.brightness(int(map(mouseX 0 width -255 255)))<line_sep>image(opencv.getOutput() 0 0)<block_end>
|
<import_from_stmt>recon.core.module BaseModule<class_stmt>Module(BaseModule)<block_start>meta={'name':'Reverse Geocoder' 'author':'<NAME> (<EMAIL>)' 'description':'Queries the Google Maps API to obtain an address from coordinates.' 'query':'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL' }<def_stmt>module_run self points<block_start><for_stmt>point points<block_start>self.verbose("Reverse geocoding (%s)..."%(point))<line_sep>payload={'latlng':point 'sensor':'false'}<line_sep>url='https://maps.googleapis.com/maps/api/geocode/json'<line_sep>resp=self.request(url payload=payload)<line_sep># kill the module if nothing is returned
<if_stmt>len(resp.json['results'])<eq>0<block_start>self.output('Unable to resolve an address for (%s).'%(point))<line_sep><return><block_end># loop through the results
found=<false><for_stmt>result resp.json['results']<block_start><if_stmt>result['geometry']['location_type']<eq>'ROOFTOP'<block_start>found=<true><line_sep>lat=point.split(',')[0]<line_sep>lon=point.split(',')[1]<line_sep>address=result['formatted_address']<line_sep># store the result
self.add_locations(lat lon address)<block_end><block_end><if_stmt>found<block_start>self.query('DELETE FROM locations WHERE latitude=? AND longitude=? AND street_address IS NULL' (lat lon))<block_end><block_end><block_end><block_end>
|
<import_from_stmt>django.apps AppConfig<class_stmt>RecommendationConfig(AppConfig)<block_start>name='recommendation'<line_sep># app名字后台显示中文
verbose_name="推荐管理"<block_end>
|
"""
Acquisition functions.
"""<line_sep># pylint: disable=wildcard-import
<import_from_stmt>.simple *<import_from_stmt>. simple<line_sep>__all__=[]<line_sep>__all__<augadd>simple.__all__<line_sep>
|
<import_from_future_stmt> absolute_import print_function<import_stmt>asyncio<import_from_stmt>..otel_ot_shim_tracer MockTracer<import_from_stmt>..testcase OpenTelemetryTestCase<class_stmt>TestAsyncio(OpenTelemetryTestCase)<block_start><def_stmt>setUp self<block_start>self.tracer=MockTracer()<line_sep>self.loop=asyncio.get_event_loop()<block_end><def_stmt>test_main self<block_start>res=self.loop.run_until_complete(self.parent_task("message"))<line_sep>self.assertEqual(res "message::response")<line_sep>spans=self.tracer.finished_spans()<line_sep>self.assertEqual(len(spans) 2)<line_sep>self.assertNamesEqual(spans ["child" "parent"])<line_sep>self.assertIsChildOf(spans[0] spans[1])<block_end><async_keyword><def_stmt>parent_task self message# noqa
<block_start><with_stmt>self.tracer.start_active_span("parent")<block_start>res=<await>self.child_task(message)<block_end><return>res<block_end><async_keyword><def_stmt>child_task self message# No need to pass/activate the parent Span, as it stays in the context.
<block_start><with_stmt>self.tracer.start_active_span("child")<block_start><return>f"{message}::response"<block_end><block_end><block_end>
|
<class_stmt>Publisher<block_start><def_stmt>__init__ self<block_start>self.observers=[]<block_end><def_stmt>add self observer<block_start><if_stmt>observer<not><in>self.observers<block_start>self.observers.append(observer)<block_end><else_stmt><block_start>print(f'Failed to add: {observer}')<block_end><block_end><def_stmt>remove self observer<block_start><try_stmt><block_start>self.observers.remove(observer)<block_end><except_stmt>ValueError<block_start>print(f'Failed to remove: {observer}')<block_end><block_end><def_stmt>notify self<block_start>[o.notify(self)<for>o self.observers]<block_end><block_end><class_stmt>DefaultFormatter(Publisher)<block_start><def_stmt>__init__ self name<block_start>Publisher.__init__(self)<line_sep>self.name=name<line_sep>self._data=0<block_end><def_stmt>__str__ self<block_start><return>f"{type(self).__name__}: '{self.name}' has data = {self._data}"<block_end>@property<def_stmt>data self<block_start><return>self._data<block_end>@data.setter<def_stmt>data self new_value<block_start><try_stmt><block_start>self._data=int(new_value)<block_end><except_stmt>ValueError<as>e<block_start>print(f'Error: {e}')<block_end><else_stmt><block_start>self.notify()<block_end><block_end><block_end><class_stmt>HexFormatterObs<block_start><def_stmt>notify self publisher<block_start>value=hex(publisher.data)<line_sep>print(f"{type(self).__name__}: '{publisher.name}' has now hex data = {value}")<block_end><block_end><class_stmt>BinaryFormatterObs<block_start><def_stmt>notify self publisher<block_start>value=bin(publisher.data)<line_sep>print(f"{type(self).__name__}: '{publisher.name}' has now bin data = {value}")<block_end><block_end><def_stmt>main <block_start>df=DefaultFormatter('test1')<line_sep>print(df)<line_sep>print()<line_sep>hf=HexFormatterObs()<line_sep>df.add(hf)<line_sep>df.data=3<line_sep>print(df)<line_sep>print()<line_sep>bf=BinaryFormatterObs()<line_sep>df.add(bf)<line_sep>df.data=21<line_sep>print(df)<line_sep>print()<line_sep>df.remove(hf)<line_sep>df.data=40<line_sep>print(df)<line_sep>print()<line_sep>df.remove(hf)<line_sep>df.add(bf)<line_sep>df.data='hello'<line_sep>print(df)<line_sep>print()<line_sep>df.data=15.8<line_sep>print(df)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
'''OpenGL extension OES.required_internalformat
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.required_internalformat to provide a more
Python-friendly API
Overview (from the spec)
The ES 1.1 API allows an implementation to store texture data internally
with arbitrary precision, regardless of the format and type of the data
supplied by the application. Similarly, ES allows an implementation to
choose an arbitrary precision for the internal storage of image data
allocated by glRenderbufferStorageOES.
While this allows flexibility for implementations, it does mean that an
application does not have a reliable means to request the implementation
maintain a specific precision or to find out what precision the
implementation will maintain for a given texture or renderbuffer image.
For reference, "Desktop" OpenGL uses the <internalformat> argument to
glTexImage*, glCopyTexImage* and glRenderbufferStorageEXT as a hint,
defining the particular base format and precision that the application wants
the implementation to maintain when storing the image data. Further, the
application can choose an <internalformat> with a different base internal
format than the source format specified by <format>. The implementation is
not required to exactly match the precision specified by <internalformat>
when choosing an internal storage precision, but it is required to match the
base internal format of <internalformat>.
In addition, ES 1.1 does not allow an implementation to fail a request to
glTexImage2D for any of the legal <format> and <type> combinations listed in
Table 3.4, even if the implementation does not natively support data stored
in that external <format> and <type>. However, there are no additional
requirements placed on the implementation. The ES implementation is free to
store the texture data with lower precision than originally specified, for
instance. Further, since ES removes the ability to query the texture object
to find out what internal format it chose, there is no way for the
application to find out that this has happened.
This extension addresses the situation in two ways:
1) This extension introduces the ability for an application to specify
the desired "sized" internal formats for texture image allocation.
2) This extension guarantees to maintain at least the specified
precision of all available sized internal formats.
An implementation that exports this extension is committing to support all
of the legal values for <internalformat> in Tables 3.4, 3.4.x, and 3.4.y,
subject to the extension dependencies described herein. That is to say, the
implementation is guaranteeing that choosing an <internalformat> argument
with a value from these tables will not cause an image allocation request to
fail. Furthermore, it is guaranteeing that for any sized internal format,
the renderbuffer or texture data will be stored with at least the precision
prescribed by the sized internal format.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/required_internalformat.txt
'''<import_from_stmt>OpenGL platform constant arrays<import_from_stmt>OpenGL extensions wrapper<import_stmt>ctypes<import_from_stmt>OpenGL.raw.GLES1 _types _glgets<import_from_stmt>OpenGL.raw.GLES1.OES.required_internalformat *<import_from_stmt>OpenGL.raw.GLES1.OES.required_internalformat _EXTENSION_NAME<def_stmt>glInitRequiredInternalformatOES <block_start>'''Return boolean indicating whether this extension is available'''<import_from_stmt>OpenGL extensions<line_sep><return>extensions.hasGLExtension(_EXTENSION_NAME)<block_end>### END AUTOGENERATED SECTION
|
<import_from_future_stmt> absolute_import division print_function<import_stmt>psana<import_from_stmt>xfel.lcls_api.psana_cctbx CctbxPsanaEventProcessor<def_stmt>simple_example experiment run_number detector_address params_file event_num<block_start>""" Demo using the cctbx/lcls api
@param experiment LCLS experiment string
@param run_number Run number
@param params_file cctbx/DIALS parameter file for processing
@param event_num Index for specific event to process
"""<line_sep>output_tag='%s_run%d'%(experiment run_number)<line_sep>print("Getting datasource")<line_sep>ds=psana.DataSource('exp=%s:run=%d'%(experiment run_number))<line_sep>processor=CctbxPsanaEventProcessor(params_file output_tag logfile=output_tag+".log")<for_stmt>run ds.runs()<block_start>print("Getting detector")<line_sep>det=psana.Detector(detector_address)<line_sep>processor.setup_run(run det)<for_stmt>event_id,event enumerate(ds.events())<block_start>print(event_id)<if_stmt>event_num<is><not><none><and>event_id<ne>event_num<block_start><continue><block_end>processor.process_event(event str(event_id))<line_sep><break><block_end><break><block_end>processor.finalize()<block_end><def_stmt>full_api_example experiment run_number detector_address params_file event_num<block_start>""" Demo using the cctbx/lcls api
@param experiment LCLS experiment string
@param run_number Run number
@param params_file cctbx/DIALS parameter file for processing
@param event_num Index for specific event to process
"""<line_sep>output_tag='%s_run%d'%(experiment run_number)<line_sep>print("Getting datasource")<line_sep>ds=psana.DataSource('exp=%s:run=%d'%(experiment run_number))<line_sep>processor=CctbxPsanaEventProcessor(params_file output_tag)# note, logfile already initialized in this demo, so don't do it twice
<for_stmt>run ds.runs()<block_start>print("Getting detector")<line_sep>det=psana.Detector(detector_address)<line_sep>processor.setup_run(run det)<for_stmt>event_id,event enumerate(ds.events())<block_start>print(event_id)<if_stmt>event_num<is><not><none><and>event_id<ne>event_num<block_start><continue><block_end>tag='%s_%s'%(output_tag str(event_id))<line_sep>experiments=processor.experiments_from_event(event)<line_sep>processor.tag=tag<line_sep>processor.setup_filenames(tag)<try_stmt><block_start>processor.pre_process(experiments)<line_sep>observed=processor.find_spots(experiments)<line_sep>experiments,indexed=processor.index(experiments observed)<line_sep>experiments,indexed=processor.refine(experiments indexed)<line_sep>integrated=processor.integrate(experiments indexed)<line_sep>print("Integrated %d spots on %d lattices"%(len(integrated) len(experiments)))<block_end><except_stmt>Exception<as>e<block_start>print("Couldn't process event %d"%event_id str(e))<block_end><break><block_end><break><block_end>processor.finalize()<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<line_sep>experiment,run_number,detector_address,params_file,event_num=sys.argv[1:6]<line_sep>simple_example(experiment int(run_number) detector_address params_file int(event_num))<line_sep>full_api_example(experiment int(run_number) detector_address params_file int(event_num))<block_end>
|
# Generated by Django 2.2.11 on 2021-06-12 14:33
<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>uuid<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('facility' '0253_auto_20210612_1256') ]<line_sep>operations=[migrations.CreateModel(name='PatientNotes' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('external_id' models.UUIDField(db_index=<true> default=uuid.uuid4 unique=<true>)) ('created_date' models.DateTimeField(auto_now_add=<true> db_index=<true> null=<true>)) ('modified_date' models.DateTimeField(auto_now=<true> db_index=<true> null=<true>)) ('deleted' models.BooleanField(db_index=<true> default=<false>)) ('note' models.TextField(blank=<true> default='')) ('created_by' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.SET_NULL to=settings.AUTH_USER_MODEL)) ('facility' models.ForeignKey(on_delete=django.db.models.deletion.PROTECT to='facility.Facility')) ('patient' models.ForeignKey(on_delete=django.db.models.deletion.PROTECT to='facility.PatientRegistration')) ] options={'abstract':<false> } ) ]<block_end>
|
<import_stmt>pandas<as>pds<import_stmt>numpy<as>np<import_from_stmt>datetime datetime<import_stmt>torch<def_stmt>numpy_fill arr<block_start>mask=np.isnan(arr)<line_sep>idx=np.where(~mask np.arange(mask.shape[1]) 0)<line_sep>np.maximum.accumulate(idx axis=1 out=idx)<line_sep>out=arr[np.arange(idx.shape[0])[: <none>] idx]<line_sep><return>out<block_end><def_stmt>get_intervention country standarize=<false> smooth=<true> legacy=<false><block_start>csvs=['c1_schoolclosing.csv' 'c2_workplaceclosing.csv' 'c3_cancelpublicevents.csv' 'c4_restrictionsongatherings.csv' 'c5_closepublictransport.csv' 'c6_stayathomerequirements.csv' 'c7_domestictravel.csv' 'c8_internationaltravel.csv' 'e1_incomesupport.csv' 'e2_debtcontractrelief.csv' 'h1_publicinfocampaign.csv' 'h2_testingpolicy.csv']+['c{}_flag.csv'.format(x)<for>x range(1 8)]+['e1_flag.csv' 'h1_flag.csv']<if_stmt><not>legacy<block_start>files=['ox-policy-tracker/data/timeseries/{}'.format(i)<for>i csvs]<block_end><else_stmt><block_start>files=['covid-policy-tracker-legacy/data/timeseries/{}'.format(i)<for>i csvs]<block_end>idx_list=[]<for_stmt>f files<block_start>dat_ox=pds.read_csv(f)<line_sep>dat_ox.rename(columns={'Unnamed: 0':'country' 'Unnamed: 1':'country_code'} inplace=<true>)<line_sep>dat_ox[dat_ox<eq>'.']='NaN'<line_sep>dt_list=[datetime.strptime(x '%d%b%Y').date()<for>x dat_ox.columns[2:]]<line_sep>dat_country=dat_ox[dat_ox['country']<eq>country]<line_sep>index_country=dat_country.iloc[0 2:].values.astype(np.float)<line_sep># fill na with previous value
index_country=numpy_fill(index_country[<none> :])<line_sep># handle the case of initial zeros
index_country[np.isnan(index_country)]=0<line_sep>idx_list.append(index_country[0 :])<block_end>idx=np.stack(idx_list -1)<if_stmt>standarize<block_start>idx=(idx-np.mean(idx axis=0))/np.std(idx axis=0)<line_sep>idx[np.isnan(idx)]=0<block_end><if_stmt>smooth<block_start>dy_list=list()<for_stmt>i range(idx.shape[1])<block_start>ds=idx[: i]<line_sep>dy=smooth_curve_1d(ds)<line_sep>dy_list.append(dy)<block_end>idx=np.stack(dy_list axis=-1)<block_end><return>idx<block_end><def_stmt>smooth_curve_1d x<block_start>w=np.ones(7 'd')<line_sep>y=np.convolve(w/w.sum() x mode='valid')<line_sep>y=np.concatenate([np.zeros(3) y])<line_sep><return>y<block_end><def_stmt>get_deaths country to_torch=<false> legacy=<false> smart_start=<true> pad=0 rebuttal=<false># get time series
<block_start><if_stmt><not>legacy<block_start>file='ts-data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'<block_end><else_stmt><block_start>file='COVID-19-legacy/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'<block_end><if_stmt>rebuttal<block_start>file='COVID-19-rebuttal-08-10/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'<block_end>dat=pds.read_csv(file)<line_sep>dt_list=[datetime.strptime(x '%m/%d/%y').date()<for>x dat.columns[4:]]<if_stmt>country<not><in>['China' 'Canada']<block_start>country_data=dat[(dat['Country/Region']<eq>country)&(dat['Province/State'].isnull())].iloc[0 4:].values<block_end><else_stmt><block_start>country_data=np.sum(dat[(dat['Country/Region']<eq>country)].iloc[: 4:].values axis=0)<block_end>ind=(country_data<ne>0).argmax()-pad<if_stmt>ind<l>0<block_start>print(country)<line_sep>ind=0<block_end># assert ind >= 0
cum_deaths=country_data[ind:].astype(np.float64)<line_sep>dt_list=dt_list[ind:]<line_sep>daily_deaths=np.diff(np.append(np.zeros(1) cum_deaths))<if_stmt>country<eq>'Philippines'<block_start>cum_deaths=cum_deaths[39:]<line_sep>dt_list=dt_list[39:]<line_sep>daily_deaths=daily_deaths[39:]<block_end><if_stmt>country<eq>'France'<block_start>cum_deaths=cum_deaths[17:]<line_sep>dt_list=dt_list[17:]<line_sep>daily_deaths=daily_deaths[17:]<block_end># get population
dat_feat=pds.read_csv('country_feature/country_feats.csv')<if_stmt>country<eq>'US'<block_start>p_country='United States'<block_end><elif_stmt>country<eq>'Korea, South'<block_start>p_country='Korea, Rep.'<block_end><elif_stmt>country<eq>'Iran'<block_start>p_country='Iran, Islamic Rep.'<block_end><elif_stmt>country<eq>'Russia'<block_start>p_country='Russian Federation'<block_end><elif_stmt>country<eq>'Egypt'<block_start>p_country='Egypt, Arab Rep.'<block_end><else_stmt><block_start>p_country=country<block_end>population=dat_feat[(dat_feat['Country.Name']<eq>p_country)&(dat_feat['metric']<eq>'Population, total')]<line_sep>population=population['value'].values[0]<line_sep># define the starting point
<if_stmt>smart_start<block_start>rate=3.061029261722505e-08<line_sep>daily_death_min=rate<times>population<line_sep>ind_death=((daily_deaths<ge>daily_death_min)<times>.1).argmax()<line_sep>cum_deaths=cum_deaths[ind_death:]<line_sep>dt_list=dt_list[ind_death:]<line_sep>daily_deaths=daily_deaths[ind_death:]<block_end># get oxford index
<if_stmt><not>legacy<block_start>dat_ox=pds.read_csv('ox-policy-tracker/data/timeseries/stringencyindex_legacy.csv')<block_end><else_stmt><block_start>dat_ox=pds.read_csv('covid-policy-tracker-legacy/data/timeseries/stringencyindex_legacy.csv')<block_end>dat_ox.rename(columns={'Unnamed: 0':'country' 'Unnamed: 1':'country_code'} inplace=<true>)<line_sep>dt_list_ind=[datetime.strptime(x '%d%b%Y').date()<for>x dat_ox.columns[2:]]<line_sep>dat_ox[dat_ox<eq>'.']='NaN'<if_stmt>country<eq>'US'<block_start>o_country='United States'<block_end><elif_stmt>country<eq>'Korea, South'<block_start>o_country='South Korea'<block_end><else_stmt><block_start>o_country=country<block_end>dat_country=dat_ox[dat_ox['country']<eq>o_country]<line_sep># 7d mv smooth
index_country=dat_country.iloc[0 2:].values.astype(np.float)<line_sep>ind_len=len(index_country)<line_sep>index_country=smooth_curve_1d(index_country)[:ind_len]<line_sep>index_country[np.isnan(index_country)]=np.nanmean(index_country)<line_sep>intervention=get_intervention(o_country legacy)<if_stmt><not>to_torch<block_start><return>{'dt':dt_list 'cum_death':cum_deaths 'daily_death':daily_deaths 'population':population 's_index_dt':dt_list_ind 's_index':index_country 'intervention':intervention}<block_end><else_stmt><block_start><return>{'dt':dt_list 'cum_death':torch.tensor(cum_deaths) 'daily_death':torch.tensor(daily_deaths) 'population':population 's_index_dt':dt_list_ind 's_index':torch.tensor(index_country) 'intervention':torch.tensor(intervention)}<block_end><block_end><def_stmt>pad_sequence_trailing sequences padding_value=0# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
<block_start>max_size=sequences[0].size()<line_sep>trailing_dims=max_size[1:]<line_sep>max_len=max([s.size(0)<for>s sequences])<line_sep>out_dims=(max_len len(sequences))+trailing_dims<line_sep>out_tensor=sequences[0].data.new(*out_dims).fill_(padding_value)<for_stmt>i,tensor enumerate(sequences)<block_start>length=tensor.size(0)<line_sep># use index notation to prevent duplicate references to the tensor
out_tensor[-length: i <ellipsis>]=tensor<block_end><return>out_tensor<block_end><def_stmt>cut_s_index data_dict<block_start>ind=data_dict['s_index_dt'].index(data_dict['dt'][0])<line_sep>s_len=len(data_dict['cum_death'])<line_sep>s_index=data_dict['s_index'][ind:ind+s_len]<line_sep>intervention=data_dict['intervention'][ind:ind+s_len]<line_sep><return>s_index intervention<block_end><def_stmt>get_data_pyro countries legacy=<false> smart_start=<true> pad=0 rebuttal=<false><block_start>data_list=[get_deaths(x <true> legacy smart_start pad rebuttal)<for>x countries]<line_sep>init_days=[x['dt'][0]<for>x data_list]<line_sep>init_day=min(init_days)<line_sep>t_first_blood=[(x-init_day).days<for>x init_days]<line_sep>cum_death=pad_sequence_trailing([x['cum_death']<for>x data_list])<line_sep>daily_death=pad_sequence_trailing([x['daily_death']<for>x data_list])<line_sep>si_cut=[cut_s_index(x)<for>x data_list]<line_sep>s_index=pad_sequence_trailing([x[0]<for>x si_cut])/100<line_sep>i_index=pad_sequence_trailing([x[1]<for>x si_cut])<line_sep>N_list=[x['population']<for>x data_list]<line_sep>date_list=pds.date_range(init_day periods=cum_death.size(0))<line_sep>country_feat=get_country_feature(countries)<line_sep>feat_list=['Mortality from CVD, cancer, diabetes or CRD between exact ages 30 and 70 (%)' 'Mortality rate, adult, male (per 1,000 male adults)' 'Mortality rate attributed to household and ambient air pollution, age-standardized (per 100,000 population)' 'Incidence of tuberculosis (per 100,000 people)' 'Immunization, measles (% of children ages 12-23 months)' 'Immunization, DPT (% of children ages 12-23 months)' 'Immunization, HepB3 (% of one-year-old children)' 'Cause of death, by communicable diseases and maternal, prenatal and nutrition conditions (% of total)' 'Prevalence of overweight (% of adults)']<line_sep>country_feat=country_feat[country_feat.metric.isin(feat_list)]<line_sep>dat_feat=country_feat.pivot('country' 'metric' 'value')<line_sep>feat=np.zeros_like(dat_feat.values)<for_stmt>i range(len(countries))<block_start>feat[i]=dat_feat.loc[countries[i]].values<block_end>feat=(feat-np.nanmean(feat axis=0))/np.nanstd(feat axis=0)<line_sep>feat[np.isnan(feat)]=0.<line_sep><return>{'cum_death':cum_death 'daily_death':daily_death 's_index':s_index 'i_index':i_index 'population':N_list 't_init':torch.tensor(t_first_blood).unsqueeze(-1) 'date_list':date_list 'countries':countries 'country_feat':torch.tensor(feat).to(i_index)}<block_end><def_stmt>get_country_feature country_list<block_start>dat_feat=pds.read_csv('country_feature/country_feats.csv')<line_sep>p_country_list=[]<for_stmt>country country_list<block_start><if_stmt>country<eq>'US'<block_start>p_country='United States'<block_end><elif_stmt>country<eq>'Korea, South'<block_start>p_country='Korea, Rep.'<block_end><elif_stmt>country<eq>'Iran'<block_start>p_country='Iran, Islamic Rep.'<block_end><elif_stmt>country<eq>'Russia'<block_start>p_country='Russian Federation'<block_end><elif_stmt>country<eq>'Egypt'<block_start>p_country='Egypt, Arab Rep.'<block_end><else_stmt><block_start>p_country=country<block_end>p_country_list.append(p_country)<block_end>dat_feat=dat_feat[(dat_feat['Country.Name'].isin(p_country_list))]<del_stmt>dat_feat['Country.Code']<line_sep>dat_feat['country']=dat_feat['Country.Name']<del_stmt>dat_feat['Country.Name']<line_sep>countries=dat_feat['country'].values<line_sep>countries[countries<eq>'United States']='US'<line_sep>countries[countries<eq>'Korea, Rep.']='Korea, South'<line_sep>countries[countries<eq>'Iran, Islamic Rep.']='Iran'<line_sep>countries[countries<eq>'Russian Federation']='Russia'<line_sep>countries[countries<eq>'Egypt, Arab Rep.']='Egypt'<line_sep>dat_feat['country']=list(countries)<line_sep><return>dat_feat<block_end>
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gan.exponential_moving_average."""<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_graphics.projects.gan exponential_moving_average<class_stmt>ExponentialMovingAverageTest(tf.test.TestCase)<block_start><def_stmt>test_decay_one_values_are_from_initialization self<block_start>ema=exponential_moving_average.ExponentialMovingAverage(decay=1.0)<line_sep>initial_value=2.0<line_sep>variable=tf.Variable(initial_value)<line_sep>ema.apply((variable ))<line_sep>variable.assign(3.0)<line_sep>ema.apply((variable ))<line_sep>self.assertAllClose(ema.averaged_variables[0] initial_value)<block_end><def_stmt>test_decay_zero_returns_last_value self<block_start>ema=exponential_moving_average.ExponentialMovingAverage(decay=0.0)<line_sep>final_value=3.0<line_sep>variable=tf.Variable(2.0)<line_sep>ema.apply((variable ))<line_sep>variable.assign(final_value)<line_sep>ema.apply((variable ))<line_sep>self.assertAllClose(ema.averaged_variables[0] final_value)<block_end><def_stmt>test_cross_replica_context_raises_error self<block_start>ema=exponential_moving_average.ExponentialMovingAverage(decay=0.0)<with_stmt>self.assertRaisesRegex(NotImplementedError 'Cross-replica context version not implemented.')<block_start><with_stmt>tf.distribute.MirroredStrategy().scope()<block_start>variable=tf.Variable(2.0)<line_sep>ema.apply((variable ))<block_end><block_end><block_end><def_stmt>test_mirrored_strategy_replica_context_runs self<block_start>ema=exponential_moving_average.ExponentialMovingAverage(decay=0.5)<line_sep>strategy=tf.distribute.MirroredStrategy()<def_stmt>apply_to_ema variable<block_start>ema.apply((variable ))<block_end><with_stmt>strategy.scope()<block_start>variable=tf.Variable(2.0)<line_sep>strategy.run(apply_to_ema (variable ))<block_end>self.assertAllClose(ema.averaged_variables[0] variable.read_value())<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
|
<import_stmt>torch<import_stmt>math<import_from_stmt>torch nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>vision3d.ops sigmoid_focal_loss batched_nms_rotated<import_from_stmt>vision3d.core.box_encode decode<class_stmt>ProposalLayer(nn.Module)<block_start>"""
Use BEV feature map to generate 3D box proposals.
TODO: Fix long variable names, ugly line wraps.
"""<def_stmt>__init__ self cfg<block_start>super(ProposalLayer self).__init__()<line_sep>self.cfg=cfg<line_sep>self.conv_cls=nn.Conv2d(cfg.PROPOSAL.C_IN cfg.NUM_CLASSES<times>cfg.NUM_YAW 1)<line_sep>self.conv_reg=nn.Conv2d(cfg.PROPOSAL.C_IN cfg.NUM_CLASSES<times>cfg.NUM_YAW<times>cfg.BOX_DOF 1)<line_sep>self.TOPK,self.DOF=cfg.PROPOSAL.TOPK cfg.BOX_DOF<line_sep>self._init_weights()<block_end><def_stmt>_init_weights self<block_start>nn.init.constant_(self.conv_cls.bias (-math.log(1-.01)/.01))<line_sep>nn.init.constant_(self.conv_reg.bias 0)<for_stmt>m (self.conv_cls.weight self.conv_reg.weight)<block_start>nn.init.normal_(m std=0.01)<block_end><block_end><def_stmt>_generate_group_idx self B n_cls<block_start>"""Compute unique group_idx based on (batch_idx, class_idx) tuples."""<line_sep>batch_idx=torch.arange(B)[: <none>].expand(-1 n_cls)<line_sep>class_idx=torch.arange(n_cls)[<none> :].expand(B -1)<line_sep>group_idx=class_idx+n_cls<times>batch_idx<line_sep>b,c,g=[x[<ellipsis> <none>].expand(-1 -1 self.TOPK).reshape(-1)<for>x (batch_idx class_idx group_idx)]<line_sep><return>b c g<block_end><def_stmt>_above_score_thresh self scores class_idx<block_start>"""Classes may have different score thresholds."""<line_sep>thresh=scores.new_tensor([a['score_thresh']<for>a self.cfg.ANCHORS])<line_sep>mask=scores<g>thresh[class_idx]<line_sep><return>mask<block_end><def_stmt>_multiclass_batch_nms self boxes scores<block_start>"""Only boxes with same group_idx are jointly considered in nms"""<line_sep>B,n_cls=scores.shape[:2]<line_sep>scores=scores.view(-1)<line_sep>boxes=boxes.view(-1 self.DOF)<line_sep>bev_boxes=boxes[: [0 1 3 4 6]]<line_sep>batch_idx,class_idx,group_idx=self._generate_group_idx(B n_cls)<line_sep>idx=batched_nms_rotated(bev_boxes scores group_idx iou_threshold=0.01)<line_sep>boxes,batch_idx,class_idx,scores=[x[idx]<for>x (boxes batch_idx class_idx scores)]<line_sep>mask=self._above_score_thresh(scores class_idx)<line_sep>out=[x[mask]<for>x (boxes batch_idx class_idx scores)]<line_sep><return>out<block_end><def_stmt>_decode self reg_map anchors anchor_idx<block_start>"""Expands anchors in batch dimension and calls decode."""<line_sep>B,n_cls=reg_map.shape[:2]<line_sep>anchor_idx=anchor_idx[<ellipsis> <none>].expand(-1 -1 -1 self.DOF)<line_sep>deltas=reg_map.reshape(B n_cls -1 self.cfg.BOX_DOF).gather(2 anchor_idx)<line_sep>anchors=anchors.view(1 n_cls -1 self.cfg.BOX_DOF).expand(B -1 -1 -1).gather(2 anchor_idx)<line_sep>boxes=decode(deltas anchors)<line_sep><return>boxes<block_end><def_stmt>inference self feature_map anchors<block_start>""":return (boxes, batch_idx, class_idx, scores)"""<line_sep>cls_map,reg_map=self(feature_map)<line_sep>score_map=cls_map.sigmoid_()<line_sep>B,n_cls=score_map.shape[:2]<line_sep>scores,anchor_idx=score_map.view(B n_cls -1).topk(self.TOPK -1)<line_sep>boxes=self._decode(reg_map anchors anchor_idx)<line_sep>out=self._multiclass_batch_nms(boxes scores)<line_sep><return>out<block_end><def_stmt>reshape_cls self cls_map<block_start>B,_,ny,nx=cls_map.shape<line_sep>shape=(B self.cfg.NUM_CLASSES self.cfg.NUM_YAW ny nx)<line_sep>cls_map=cls_map.view(shape)<line_sep><return>cls_map<block_end><def_stmt>reshape_reg self reg_map<block_start>B,_,ny,nx=reg_map.shape<line_sep>shape=(B self.cfg.NUM_CLASSES self.cfg.BOX_DOF -1 ny nx)<line_sep>reg_map=reg_map.view(shape).permute(0 1 3 4 5 2)<line_sep><return>reg_map<block_end><def_stmt>forward self feature_map<block_start>cls_map=self.reshape_cls(self.conv_cls(feature_map))<line_sep>reg_map=self.reshape_reg(self.conv_reg(feature_map))<line_sep><return>cls_map reg_map<block_end><block_end><class_stmt>ProposalLoss(nn.Module)<block_start>"""
Notation: (P_i, G_i, M_i) ~ (predicted, ground truth, mask).
Loss is averaged by number of positive examples.
TODO: Replace with compiled cuda focal loss.
"""<def_stmt>__init__ self cfg<block_start>super(ProposalLoss self).__init__()<line_sep>self.cfg=cfg<block_end><def_stmt>masked_sum self loss mask<block_start>"""Mask assumed to be binary."""<line_sep>mask=mask.type_as(loss)<line_sep>loss=(loss<times>mask).sum()<line_sep><return>loss<block_end><def_stmt>reg_loss self P_reg G_reg M_reg<block_start>"""Loss applied at all positive sites."""<line_sep>P_xyz,P_wlh,P_yaw=P_reg.split([3 3 1] dim=-1)<line_sep>G_xyz,G_wlh,G_yaw=G_reg.split([3 3 1] dim=-1)<line_sep>loss_xyz=F.smooth_l1_loss(P_xyz G_xyz reduction='none')<line_sep>loss_wlh=F.smooth_l1_loss(P_wlh G_wlh reduction='none')<line_sep>loss_yaw=F.smooth_l1_loss(P_yaw G_yaw reduction='none')/math.pi<line_sep>loss=self.masked_sum(loss_xyz+loss_wlh+loss_yaw M_reg)<line_sep><return>loss<block_end><def_stmt>cls_loss self P_cls G_cls M_cls<block_start>"""Loss is applied at all non-ignore sites. Assumes logit scores."""<line_sep>loss=sigmoid_focal_loss(P_cls G_cls.float() reduction='none')<line_sep>loss=self.masked_sum(loss M_cls)<line_sep><return>loss<block_end><def_stmt>forward self item<block_start>keys=['G_cls' 'M_cls' 'P_cls' 'G_reg' 'M_reg' 'P_reg']<line_sep>G_cls,M_cls,P_cls,G_reg,M_reg,P_reg=map(item.get keys)<line_sep>normalizer=M_reg.type_as(P_reg).sum().clamp_(min=1)<line_sep>cls_loss=self.cls_loss(P_cls G_cls M_cls)/normalizer<line_sep>reg_loss=self.reg_loss(P_reg G_reg M_reg)/normalizer<line_sep>loss=cls_loss+self.cfg.TRAIN.LAMBDA<times>reg_loss<line_sep>losses=dict(cls_loss=cls_loss reg_loss=reg_loss loss=loss)<line_sep><return>losses<block_end><block_end>
|
"""
FiftyOne types.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""<line_sep># pylint: disable=wildcard-import,unused-wildcard-import
<import_from_stmt>.dataset_types *<line_sep>
|
<import_stmt>pytest<import_from_stmt>plenum.test waits<import_from_stmt>plenum.common.constants LEDGER_STATUS DOMAIN_LEDGER_ID<import_from_stmt>plenum.common.messages.node_messages MessageReq CatchupReq<import_from_stmt>plenum.server.catchup.node_leecher_service NodeLeecherService<import_from_stmt>plenum.test.delayers ppDelay pDelay cDelay DEFAULT_DELAY<import_from_stmt>plenum.test.helper sdk_send_random_and_check<import_from_stmt>plenum.test.node_request.test_timestamp.helper get_timestamp_suspicion_count<import_from_stmt>plenum.test.node_catchup.helper ensure_all_nodes_have_same_data<import_from_stmt>plenum.test.stasher delay_rules start_delaying stop_delaying_and_process<import_from_stmt>stp_core.loop.eventually eventually<def_stmt>delay_domain_ledger_catchup <block_start><def_stmt>delay msg<block_start>msg=msg[0]<if_stmt>isinstance(msg MessageReq)<and>msg.msg_type<eq>LEDGER_STATUS<and>msg.params.get('ledgerId')<eq>DOMAIN_LEDGER_ID<block_start><return>DEFAULT_DELAY<block_end><if_stmt>isinstance(msg CatchupReq)<and>msg.ledgerId<eq>DOMAIN_LEDGER_ID<block_start><return>DEFAULT_DELAY<block_end><block_end><return>delay<block_end><def_stmt>test_first_audit_catchup_during_ordering tdir tconf looper txnPoolNodeSet sdk_pool_handle sdk_wallet_client<block_start>lagging_node=txnPoolNodeSet[-1]<line_sep>other_nodes=txnPoolNodeSet[:-1]<line_sep>other_stashers=[node.nodeIbStasher<for>node other_nodes]<def_stmt>lagging_node_state <arrow>NodeLeecherService.State<block_start><return>lagging_node.ledgerManager._node_leecher._state<block_end><def_stmt>check_lagging_node_is_not_syncing_audit <block_start><assert_stmt>lagging_node_state()<ne>NodeLeecherService.State.SyncingAudit<block_end># Prevent lagging node from catching up domain ledger (and finishing catchup)
<with_stmt>delay_rules(other_stashers delay_domain_ledger_catchup())# Start catchup on lagging node
<block_start>lagging_node.start_catchup()<assert_stmt>lagging_node_state()<eq>NodeLeecherService.State.SyncingAudit<line_sep># Ensure that audit ledger is caught up by lagging node
looper.run(eventually(check_lagging_node_is_not_syncing_audit))<assert_stmt>lagging_node_state()<ne>NodeLeecherService.State.Idle<line_sep># Order request on all nodes except lagging one where they goes to stashed state
sdk_send_random_and_check(looper txnPoolNodeSet sdk_pool_handle sdk_wallet_client 1)<block_end># Now catchup should end and lagging node starts processing stashed PPs
# and resumes ordering
# ensure that all nodes will have same data after that
ensure_all_nodes_have_same_data(looper txnPoolNodeSet)<line_sep># ensure that no suspicions about obsolete PP have been raised
<assert_stmt>get_timestamp_suspicion_count(lagging_node)<eq>0<block_end>
|
"""The tests for Media player platforms."""<line_sep>
|
<import_from_stmt>builtins range<import_stmt>sys<line_sep>sys.path.insert(1 "../../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_stmt>pandas<import_from_stmt>sklearn ensemble<import_from_stmt>sklearn preprocessing<import_from_stmt>sklearn.metrics roc_auc_score<import_from_stmt>h2o.estimators.gbm H2OGradientBoostingEstimator<def_stmt>ecologyGBM <block_start>ecology_train=h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))<line_sep>ntrees=100<line_sep>max_depth=5<line_sep>min_rows=10<line_sep>learn_rate=0.1<line_sep># Prepare data for scikit use
trainData=pandas.read_csv(pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))<line_sep>trainData.dropna(inplace=<true>)<line_sep>le=preprocessing.LabelEncoder()<line_sep>le.fit(trainData['Method'])<line_sep>trainData['Method']=le.transform(trainData['Method'])<line_sep>trainDataResponse=trainData["Angaus"]<line_sep>trainDataFeatures=trainData[["SegSumT" "SegTSeas" "SegLowFlow" "DSDist" "DSMaxSlope" "USAvgT" "USRainDays" "USSlope" "USNative" "DSDam" "Method" "LocSed"]]<line_sep>ecology_train["Angaus"]=ecology_train["Angaus"].asfactor()<line_sep># Train H2O GBM Model:
gbm_h2o=H2OGradientBoostingEstimator(ntrees=ntrees learn_rate=learn_rate distribution="bernoulli" min_rows=min_rows max_depth=max_depth categorical_encoding='label_encoder')<line_sep>gbm_h2o.train(x=list(range(2 ecology_train.ncol)) y="Angaus" training_frame=ecology_train)<line_sep># Train scikit GBM Model:
gbm_sci=ensemble.GradientBoostingClassifier(learning_rate=learn_rate n_estimators=ntrees max_depth=max_depth min_samples_leaf=min_rows max_features=<none>)<line_sep>gbm_sci.fit(trainDataFeatures trainDataResponse)<line_sep># Evaluate the trained models on test data
# Load the test data (h2o)
ecology_test=h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_eval.csv"))<line_sep># Load the test data (scikit)
testData=pandas.read_csv(pyunit_utils.locate("smalldata/gbm_test/ecology_eval.csv"))<line_sep>testData.dropna(inplace=<true>)<line_sep>testData['Method']=le.transform(testData['Method'])<line_sep>testDataResponse=testData["Angaus"]<line_sep>testDataFeatures=testData[["SegSumT" "SegTSeas" "SegLowFlow" "DSDist" "DSMaxSlope" "USAvgT" "USRainDays" "USSlope" "USNative" "DSDam" "Method" "LocSed"]]<line_sep># Score on the test data and compare results
# scikit
auc_sci=roc_auc_score(testDataResponse gbm_sci.predict_proba(testDataFeatures)[: 1])<line_sep># h2o
gbm_perf=gbm_h2o.model_performance(ecology_test)<line_sep>auc_h2o=gbm_perf.auc()<assert_stmt>auc_h2o<ge>auc_sci "h2o (auc) performance degradation, with respect to scikit"<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(ecologyGBM)<block_end><else_stmt><block_start>ecologyGBM()<block_end>
|
# -*- coding: utf-8 -*-
NOT_IMPLEMENTED_ERROR_MSG=('This method must be implemented by classes'<concat>' inheriting from BaseSerializer.')<class_stmt>BaseSerializer(object)<block_start>"""
Base Serializer class that provides an interface for other serializers.
Usage:
.. code-block:: python
from betamax import Betamax, BaseSerializer
class MySerializer(BaseSerializer):
name = 'my'
@staticmethod
def generate_cassette_name(cassette_library_dir, cassette_name):
# Generate a string that will give the relative path of a
# cassette
def serialize(self, cassette_data):
# Take a dictionary and convert it to whatever
def deserialize(self, cassette_data):
# Uses a cassette file to return a dictionary with the
# cassette information
Betamax.register_serializer(MySerializer)
The last line is absolutely necessary.
"""<line_sep>name=<none><line_sep>stored_as_binary=<false><line_sep>@staticmethod<def_stmt>generate_cassette_name cassette_library_dir cassette_name<block_start><raise>NotImplementedError(NOT_IMPLEMENTED_ERROR_MSG)<block_end><def_stmt>__init__ self<block_start><if_stmt><not>self.name<block_start><raise>ValueError("Serializer's name attribute must be a string"<concat>" value, not None.")<block_end>self.on_init()<block_end><def_stmt>on_init self<block_start>"""Method to implement if you wish something to happen in ``__init__``.
The return value is not checked and this is called at the end of
``__init__``. It is meant to provide the matcher author a way to
perform things during initialization of the instance that would
otherwise require them to override ``BaseSerializer.__init__``.
"""<line_sep><return><none><block_end><def_stmt>serialize self cassette_data<block_start>"""A method that must be implemented by the Serializer author.
:param dict cassette_data: A dictionary with two keys:
``http_interactions``, ``recorded_with``.
:returns: Serialized data as a string.
"""<line_sep><raise>NotImplementedError(NOT_IMPLEMENTED_ERROR_MSG)<block_end><def_stmt>deserialize self cassette_data<block_start>"""A method that must be implemented by the Serializer author.
The return value is extremely important. If it is not empty, the
dictionary returned must have the following structure::
{
'http_interactions': [{
# Interaction
},
{
# Interaction
}],
'recorded_with': 'name of recorder'
}
:params str cassette_data: The data serialized as a string which needs
to be deserialized.
:returns: dictionary
"""<line_sep><raise>NotImplementedError(NOT_IMPLEMENTED_ERROR_MSG)<block_end><block_end>
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>logging<import_stmt>threading<import_from_stmt>typing Optional Type<import_from_stmt>google.cloud.aiplatform.training_utils.cloud_profiler cloud_profiler_utils<try_stmt><block_start><import_from_stmt>werkzeug serving<block_end><except_stmt>ImportError<as>err<block_start><raise>ImportError(cloud_profiler_utils.import_error_msg)<from>err<block_end><import_from_stmt>google.cloud.aiplatform.training_utils environment_variables<import_from_stmt>google.cloud.aiplatform.training_utils.cloud_profiler webserver<import_from_stmt>google.cloud.aiplatform.training_utils.cloud_profiler.plugins base_plugin<import_from_stmt>google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow tf_profiler <line_sep># Mapping of available plugins to use
_AVAILABLE_PLUGINS={"tensorflow":tf_profiler.TFProfiler}<class_stmt>MissingEnvironmentVariableException(Exception)<block_start><pass><block_end><def_stmt>_build_plugin plugin:Type[base_plugin.BasePlugin] <arrow>Optional[base_plugin.BasePlugin]<block_start>"""Builds the plugin given the object.
Args:
plugin (Type[base_plugin]):
Required. An uninitialized plugin class.
Returns:
An initialized plugin, or None if plugin cannot be
initialized.
"""<if_stmt><not>plugin.can_initialize()<block_start>logging.warning("Cannot initialize the plugin")<line_sep><return><block_end>plugin.setup()<if_stmt><not>plugin.post_setup_check()<block_start><return><block_end><return>plugin()<block_end><def_stmt>_run_app_thread server:webserver.WebServer port:int<block_start>"""Run the webserver in a separate thread.
Args:
server (webserver.WebServer):
Required. A webserver to accept requests.
port (int):
Required. The port to run the webserver on.
"""<line_sep>daemon=threading.Thread(name="profile_server" target=serving.run_simple args=("0.0.0.0" port server ) )<line_sep>daemon.setDaemon(<true>)<line_sep>daemon.start()<block_end><def_stmt>initialize plugin:str="tensorflow"<block_start>"""Initializes the profiling SDK.
Args:
plugin (str):
Required. Name of the plugin to initialize.
Current options are ["tensorflow"]
Raises:
ValueError:
The plugin does not exist.
MissingEnvironmentVariableException:
An environment variable that is needed is not set.
"""<line_sep>plugin_obj=_AVAILABLE_PLUGINS.get(plugin)<if_stmt><not>plugin_obj<block_start><raise>ValueError("Plugin {} not available, must choose from {}".format(plugin _AVAILABLE_PLUGINS.keys()))<block_end>prof_plugin=_build_plugin(plugin_obj)<if_stmt>prof_plugin<is><none><block_start><return><block_end>server=webserver.WebServer([prof_plugin])<if_stmt><not>environment_variables.http_handler_port<block_start><raise>MissingEnvironmentVariableException("'AIP_HTTP_HANDLER_PORT' must be set.")<block_end>port=int(environment_variables.http_handler_port)<line_sep>_run_app_thread(server port)<block_end>
|
# ---------------------------------------------------------#
# astroNN.models.misc_models: Contain Misc. Models
# ---------------------------------------------------------#
<import_stmt>tensorflow.keras<as>tfk<import_from_stmt>astroNN.models.base_bayesian_cnn BayesianCNNBase<import_from_stmt>astroNN.models.base_cnn CNNBase<import_from_stmt>astroNN.nn.layers MCDropout PolyFit<import_from_stmt>astroNN.nn.losses bayesian_binary_crossentropy_wrapper bayesian_binary_crossentropy_var_wrapper<import_from_stmt>astroNN.nn.losses bayesian_categorical_crossentropy_wrapper bayesian_categorical_crossentropy_var_wrapper<line_sep>regularizers=tfk.regularizers<line_sep>Dense=tfk.layers.Dense<line_sep>Input=tfk.layers.Input<line_sep>Conv2D=tfk.layers.Conv2D<line_sep>Dropout=tfk.layers.Dropout<line_sep>Flatten=tfk.layers.Flatten<line_sep>Activation=tfk.layers.Activation<line_sep>concatenate=tfk.layers.concatenate<line_sep>MaxPooling2D=tfk.layers.MaxPooling2D<line_sep>Model=tfk.models.Model<line_sep>MaxNorm=tfk.constraints.MaxNorm<class_stmt>Cifar10CNN(CNNBase)<block_start>"""
NAME:
Cifar10CNN
PURPOSE:
To create Convolutional Neural Network model for Cifar10 for the purpose of demo
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""<def_stmt>__init__ self lr=0.005<block_start>"""
NAME:
model
PURPOSE:
To create Convolutional Neural Network model
INPUT:
OUTPUT:
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""<line_sep>super().__init__()<line_sep>self._implementation_version='1.0'<line_sep>self.initializer='he_normal'<line_sep>self.activation='relu'<line_sep>self.num_filters=[8 16]<line_sep>self.filter_len=(3 3)<line_sep>self.pool_length=(4 4)<line_sep>self.num_hidden=[256 128]<line_sep>self.max_epochs=30<line_sep>self.lr=lr<line_sep>self.reduce_lr_epsilon=0.00005<line_sep>self.reduce_lr_min=1e-8<line_sep>self.reduce_lr_patience=1<line_sep>self.l2=1e-4<line_sep>self.dropout_rate=0.1<line_sep>self.task='classification'<line_sep>self.targetname=['airplane' 'automobile' 'bird' 'cat' 'deer' 'dog' 'frog' 'horse' 'ship' 'truck']<line_sep>self.input_norm_mode=255<line_sep>self.labels_norm_mode=0<block_end><def_stmt>model self<block_start>input_tensor=Input(shape=self._input_shape['input'] name='input')<line_sep>cnn_layer_1=Conv2D(kernel_initializer=self.initializer padding="same" filters=self.num_filters[0] kernel_size=self.filter_len kernel_regularizer=regularizers.l2(self.l2))(input_tensor)<line_sep>activation_1=Activation(activation=self.activation)(cnn_layer_1)<line_sep>cnn_layer_2=Conv2D(kernel_initializer=self.initializer padding="same" filters=self.num_filters[1] kernel_size=self.filter_len kernel_regularizer=regularizers.l2(self.l2))(activation_1)<line_sep>activation_2=Activation(activation=self.activation)(cnn_layer_2)<line_sep>maxpool_1=MaxPooling2D(pool_size=self.pool_length)(activation_2)<line_sep>flattener=Flatten()(maxpool_1)<line_sep>dropout_1=Dropout(self.dropout_rate)(flattener)<line_sep>layer_3=Dense(units=self.num_hidden[0] kernel_regularizer=regularizers.l2(self.l2) kernel_initializer=self.initializer)(dropout_1)<line_sep>activation_3=Activation(activation=self.activation)(layer_3)<line_sep>dropout_2=Dropout(self.dropout_rate)(activation_3)<line_sep>layer_4=Dense(units=self.num_hidden[1] kernel_regularizer=regularizers.l2(self.l2) kernel_initializer=self.initializer kernel_constraint=MaxNorm(2))(dropout_2)<line_sep>activation_4=Activation(activation=self.activation)(layer_4)<line_sep>layer_5=Dense(units=self._labels_shape['output'])(activation_4)<line_sep>output=Activation(activation=self._last_layer_activation name='output')(layer_5)<line_sep>model=Model(inputs=input_tensor outputs=output)<line_sep><return>model<block_end><block_end># noinspection PyCallingNonCallable
<class_stmt>MNIST_BCNN(BayesianCNNBase)<block_start>"""
NAME:
MNIST_BCNN
PURPOSE:
To create Convolutional Neural Network model for Cifar10 for the purpose of demo
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""<def_stmt>__init__ self lr=0.005<block_start>"""
NAME:
model
PURPOSE:
To create Convolutional Neural Network model
INPUT:
OUTPUT:
HISTORY:
2018-Jan-11 - Written - <NAME> (University of Toronto)
"""<line_sep>super().__init__()<line_sep>self._implementation_version='1.0'<line_sep>self.initializer='he_normal'<line_sep>self.activation='relu'<line_sep>self.num_filters=[8 16]<line_sep>self.filter_len=(3 3)<line_sep>self.pool_length=(4 4)<line_sep>self.num_hidden=[256 128]<line_sep>self.max_epochs=30<line_sep>self.lr=lr<line_sep>self.reduce_lr_epsilon=0.00005<line_sep>self.reduce_lr_min=1e-8<line_sep>self.reduce_lr_patience=1<line_sep>self.l2=1e-4<line_sep>self.dropout_rate=0.1<line_sep>self.task='classification'<line_sep>self.targetname=['Zero' 'One' 'Two' 'Three' 'Four' 'Five' 'Six' 'Seven' 'Eight' 'Nine']<line_sep>self.input_norm_mode=255<line_sep>self.labels_norm_mode=0<block_end><def_stmt>model self<block_start>input_tensor=Input(shape=self._input_shape['input'] name='input')<line_sep>cnn_layer_1=Conv2D(kernel_initializer=self.initializer padding="same" filters=self.num_filters[0] kernel_size=self.filter_len kernel_regularizer=regularizers.l2(self.l2))(input_tensor)<line_sep>activation_1=Activation(activation=self.activation)(cnn_layer_1)<line_sep>dropout_1=MCDropout(self.dropout_rate disable=self.disable_dropout)(activation_1)<line_sep>cnn_layer_2=Conv2D(kernel_initializer=self.initializer padding="same" filters=self.num_filters[1] kernel_size=self.filter_len kernel_regularizer=regularizers.l2(self.l2))(dropout_1)<line_sep>activation_2=Activation(activation=self.activation)(cnn_layer_2)<line_sep>dropout_2=MCDropout(self.dropout_rate disable=self.disable_dropout)(activation_2)<line_sep>maxpool_1=MaxPooling2D(pool_size=self.pool_length)(dropout_2)<line_sep>flattener=Flatten()(maxpool_1)<line_sep>layer_3=Dense(units=self.num_hidden[0] kernel_regularizer=regularizers.l2(self.l2) kernel_initializer=self.initializer)(flattener)<line_sep>activation_3=Activation(activation=self.activation)(layer_3)<line_sep>dropout_4=MCDropout(self.dropout_rate disable=self.disable_dropout)(activation_3)<line_sep>layer_4=Dense(units=self.num_hidden[1] kernel_regularizer=regularizers.l2(self.l2) kernel_initializer=self.initializer kernel_constraint=MaxNorm(2))(dropout_4)<line_sep>activation_4=Activation(activation=self.activation)(layer_4)<line_sep>output=Dense(units=self._labels_shape['output'] activation='linear' name='output')(activation_4)<line_sep>output_activated=Activation(self._last_layer_activation)(output)<line_sep>variance_output=Dense(units=self._labels_shape['output'] activation='softplus' name='variance_output')(activation_4)<line_sep>model=Model(inputs=[input_tensor] outputs=[output variance_output])<line_sep># new astroNN high performance dropout variational inference on GPU expects single output
model_prediction=Model(inputs=[input_tensor] outputs=concatenate([output_activated variance_output]))<if_stmt>self.task<eq>'classification'<block_start>output_loss=bayesian_categorical_crossentropy_wrapper(variance_output)<line_sep>variance_loss=bayesian_categorical_crossentropy_var_wrapper(output)<block_end><elif_stmt>self.task<eq>'binary_classification'<block_start>output_loss=bayesian_binary_crossentropy_wrapper(variance_output)<line_sep>variance_loss=bayesian_binary_crossentropy_var_wrapper(output)<block_end><else_stmt><block_start><raise>RuntimeError('Only "regression", "classification" and "binary_classification" are supported')<block_end><return>model model_prediction output_loss variance_loss<block_end><block_end># noinspection PyCallingNonCallable
<class_stmt>SimplePolyNN(CNNBase)<block_start>"""
Class for Neural Network for Gaia Polynomial fitting
:History: 2018-Jul-23 - Written - <NAME> (University of Toronto)
"""<def_stmt>__init__ self lr=0.005 init_w=<none> use_xbias=<false><block_start>super().__init__()<line_sep>self._implementation_version='1.0'<line_sep>self.max_epochs=40<line_sep>self.lr=lr<line_sep>self.reduce_lr_epsilon=0.00005<line_sep>self.num_hidden=3# equals degree of polynomial to fit
self.reduce_lr_min=1e-8<line_sep>self.reduce_lr_patience=2<line_sep>self.input_norm_mode=0<line_sep>self.labels_norm_mode=0<line_sep>self.init_w=init_w<line_sep>self.use_xbias=use_xbias<line_sep>self.task='regression'<line_sep>self.targetname=['unbiased_parallax']<block_end><def_stmt>model self<block_start>input_tensor=Input(shape=self._input_shape name='input')<line_sep>flattener=Flatten()(input_tensor)<line_sep>output=PolyFit(deg=self.num_hidden output_units=self._labels_shape use_xbias=self.use_xbias name='output' init_w=self.init_w kernel_regularizer=regularizers.l2(self.l2))(flattener)<line_sep>model=Model(inputs=input_tensor outputs=output)<line_sep><return>model<block_end><block_end>
|
<def_stmt>new_seating_chart size=22<block_start>"""Create a new seating chart.
:param size: int - number if seats in the seating chart.
:return: dict - with number of seats specified, and placeholder values.
"""<line_sep><return>{number:<none><for>number range(1 size+1)}<block_end><def_stmt>arrange_reservations guests=<none><block_start>"""Assign guests to seats.
:param guest_list: list - list of guest names for reservations.
:return: dict - Default sized dictionary with guests assigned seats,
and placeholders for empty seats.
"""<line_sep>seats=new_seating_chart()<if_stmt>guests<block_start><for_stmt>seat_number range(1 len(guests))<block_start>seats[seat_number]=guests[seat_number]<block_end><block_end><return>seats<block_end><def_stmt>find_all_available_seats seats<block_start>"""Find and return seat numbers that are unassigned.
:param seats: dict - seating chart.
:return: list - list of seat numbers available for reserving..
"""<line_sep>available=[]<for_stmt>seat_num,value seats.items()<block_start><if_stmt>value<is><none><block_start>available.append(seat_num)<block_end><block_end><return>available<block_end><def_stmt>current_empty_seat_capacity seats<block_start>"""Find the number of seats that are currently empty.
:param seats: dict - dictionary of reserved seats.
:return: int - number of seats empty.
"""<line_sep>count=0<for_stmt>value seats.values()<block_start><if_stmt>value<is><none><block_start>count<augadd>1<block_end><block_end><return>count<block_end><def_stmt>accommodate_waiting_guests seats guests<block_start>"""Asses if guest can be accommodated. Update seating if they can be.
:param seats: dict - seating chart dictionary.
:param guests: list - walk-in guests
:return: dict - updated seating chart with available spaces filled.
"""<line_sep>curr_empty_seats=current_empty_seat_capacity(seats)<line_sep>empty_seat_list=find_all_available_seats(seats)<if_stmt>len(guests)<le>curr_empty_seats<block_start><for_stmt>index,_ enumerate(guests)<block_start>seats[empty_seat_list[index]]=guests[index]<block_end><block_end><return>seats<block_end><def_stmt>empty_seats seats seat_numbers<block_start>"""Empty listed seats of their previous reservations.
:param seats: dict - seating chart dictionary.
:param seat_numbers: list - list of seat numbers to free up or empty.
:return: updated seating chart dictionary.
"""<for_stmt>seat seat_numbers<block_start>seats[seat]=<none><block_end><return>seats<block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>. models<import_from_stmt>. utils<line_sep># TODO: add version information here
__all__=['models' 'utils']<line_sep>
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>requests<import_from_stmt>unfurl utils<line_sep>hash_edge={'color':{'color':'#4A93AE'} 'title':'Hash Identification Functions' 'label':'#'}<line_sep>hash_lookup_edge={'color':{'color':'#4A93AE'} 'title':'Hash Lookup Functions' 'label':'#'}<def_stmt>nitrxgen_md5_lookup value<block_start>response=requests.get(f'https://www.nitrxgen.net/md5db/{value}' verify=<false>).text<if_stmt>response<block_start><return>response<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>virustotal_lookup unfurl hash_value<block_start>response=requests.get(f'https://www.virustotal.com/api/v3/files/{hash_value}' headers={'x-apikey':unfurl.api_keys.get('virustotal')})<if_stmt>response.status_code<eq>200<block_start><try_stmt><block_start>result=response.json()<line_sep><return>result['data']['attributes']<block_end><except_stmt><block_start><return><false><block_end><block_end><block_end><def_stmt>decode_cisco_type_7 encoded_text<block_start>cisco_constant=b"dsfd;kfoA,.iyewrkldJKDHSUBsgvca69834ncxv9873254k;fg87"<try_stmt><block_start>salt=int(encoded_text[0:2])<block_end><except_stmt>ValueError# Valid salts should be ints; if not, move on.
<block_start><return><block_end><try_stmt><block_start>encoded=bytearray.fromhex(encoded_text[2:])<block_end><except_stmt>ValueError# Not valid Type 7 encoded then; exit
<block_start><return><block_end>plaintext=''<for_stmt>i range(0 len(encoded))<block_start>j=(i+salt)%53<line_sep>p=encoded[i]^cisco_constant[j]<line_sep>plaintext<augadd>chr(p)<block_end># If the result isn't readable as ASCII, call it a false positive and move on without adding a node.
<try_stmt><block_start>_=plaintext.encode('ascii' errors='strict')<block_end><except_stmt>UnicodeEncodeError<block_start><return><block_end><return>plaintext<block_end><def_stmt>run unfurl node<block_start><if_stmt>node.data_type.startswith('uuid')<block_start><return><block_end><if_stmt>node.data_type.startswith('hash')<block_start><if_stmt>node.data_type<eq>'hash.md5'<and>unfurl.remote_lookups<block_start>hash_plaintext=nitrxgen_md5_lookup(node.value)<if_stmt>hash_plaintext<block_start>unfurl.add_to_queue(data_type=f'text' key='Plaintext' value=hash_plaintext hover='Queried Nitrxgen database of MD5 hashes and found a matching plaintext value' parent_id=node.node_id incoming_edge_config=hash_lookup_edge)<block_end><block_end><if_stmt>node.data_type<in>('hash.md5' 'hash.sha-1' 'hash.sha-256')<and>unfurl.remote_lookups<block_start>vt_results=virustotal_lookup(unfurl node.value)<if_stmt>vt_results<block_start>label_text='Hash found on VirusTotal'<if_stmt>vt_results.get("type_description")<block_start>label_text<augadd>f'\nFile Type: {vt_results.get("type_description")};'<block_end><if_stmt>vt_results.get("meaningful_name")<block_start>label_text<augadd>f'\nName: {vt_results.get("meaningful_name")};'<block_end><if_stmt>vt_results.get("reputation")<block_start>label_text<augadd>f'\nReputation: {vt_results.get("reputation")};'<block_end>unfurl.add_to_queue(data_type=f'text' key='Hash found on VirusTotal' value=<none> label=label_text hover='Queried VirusTotal with the hash value and found a match.' parent_id=node.node_id incoming_edge_config=hash_lookup_edge)<block_end><block_end><block_end><else_stmt><block_start><if_stmt><not>isinstance(node.value str)<block_start><return><block_end># Filter for values that are only hex chars (A-F,0-9) and contains both a letter and number.
# This could conceivably filter out valid hashes, but will filter out many more invalid values.
<if_stmt><not>(utils.hex_re.fullmatch(node.value)<and>utils.digits_re.search(node.value)<and>utils.letters_re.search(node.value))<block_start><return><block_end># Cisco "Type 7" password encoding is very flexible, so detecting it is very false positive prone
# as it isn't a fixed length. However, decoding it is easy, so Unfurl will only "detect" something as
# using this encoding type if it can also decode it (as a method of verifying it).
# Ref: https://passlib.readthedocs.io/en/stable/lib/passlib.hash.cisco_type7.html
cisco_type_7_m=utils.cisco_7_re.fullmatch(node.value)<if_stmt>cisco_type_7_m<block_start>cisco_type_7_plaintext=decode_cisco_type_7(node.value)<if_stmt>cisco_type_7_plaintext<block_start>unfurl.add_to_queue(data_type=f'text' key=f'Cisco "Type 7" encoding' value=cisco_type_7_plaintext label=f'Cisco "Type 7" encoding; plaintext is "{cisco_type_7_plaintext}"' hover='Cisco "Type 7" password encoding is based<br> on XOR and is easily reversible '<concat>'[<a hre="https://passlib.readthedocs.io/en/stable/lib/passlib.hash.cisco_type7.html">'<concat>'ref</a>].' parent_id=node.node_id incoming_edge_config=hash_edge)<block_end><return><block_end><if_stmt>len(node.value)<eq>32<and>node.value[12]<eq>'4'# UUIDv4 is very common and it's the same length as an MD5 hash. This might filter out some legitimate
# MD5 hashes, but it will filter out many more UUIDs. I think the tradeoff is worth it for Unfurl.
<block_start><return><block_end>hash_name,hash_hover,new_node_value=<none> <none> <none><if_stmt>len(node.value)<eq>32<block_start>hash_name='MD5'<line_sep>hash_hover=f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'<block_end><if_stmt>len(node.value)<eq>40<block_start>hash_name='SHA-1'<line_sep>hash_hover=f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'<block_end><if_stmt>len(node.value)<eq>64<block_start>hash_name='SHA-256'<line_sep>hash_hover=f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'<block_end><if_stmt>len(node.value)<eq>128<block_start>hash_name='SHA-512'<line_sep>hash_hover=f'This is potentially a <b>{hash_name}</b> hash <br>(based on length and character set).'<block_end><if_stmt>hash_name<in>('MD5' 'SHA-1' 'SHA-256')# Pass through the values of three common file hashes for further analysis; don't send on the
# other types to avoid duplicate processing.
<block_start>new_node_value=node.value<block_end><if_stmt>hash_name<block_start>unfurl.add_to_queue(data_type=f'hash.{hash_name.lower()}' key=f'{hash_name} Hash' value=new_node_value label=f'Potential {hash_name} hash' hover=hash_hover parent_id=node.node_id incoming_edge_config=hash_edge)<block_end><block_end><block_end>
|
<import_stmt>datetime<import_stmt>unittest<import_stmt>omniture<class_stmt>UtilsTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>fakelist=[{"id":"123" "title":"abc"} {"id":"456" "title":"abc"}]<line_sep>self.alist=omniture.Value.list("segemnts" fakelist {})<block_end><def_stmt>tearDown self<block_start><del_stmt>self.alist<block_end><def_stmt>test_addressable_list_repr_html_ self<block_start>"""Test the _repr_html_ for AddressableList this is used in ipython """<line_sep>outlist='<table><tr><td><b>ID</b></td><td><b>Title</b></td></tr><tr><td><b>123</b></td><td>abc</td></tr><tr><td><b>456</b></td><td>abc</td></tr></table>'<line_sep>self.assertEqual(self.alist._repr_html_() outlist "The _repr_html_ isn't working: {}".format(self.alist._repr_html_()))<block_end><def_stmt>test_addressable_list_str_ self<block_start>"""Test _str_ method """<line_sep>outstring='ID 123 | Name: abc \nID 456 | Name: abc \n'<line_sep>self.assertEqual(self.alist.__str__() outstring "The __str__ isn't working: {}".format(self.alist.__str__()))<block_end><def_stmt>test_addressable_list_get_time self<block_start>""" Test the custom get item raises a problem when there are duplicate names """<with_stmt>self.assertRaises(KeyError)<block_start>self.alist['abc']<block_end><block_end><def_stmt>test_wrap self<block_start>"""Test the wrap method """<line_sep>self.assertIsInstance(omniture.utils.wrap("test") list)<line_sep>self.assertIsInstance(omniture.utils.wrap(["test"]) list)<line_sep>self.assertEqual(omniture.utils.wrap("test") ["test"])<line_sep>self.assertEqual(omniture.utils.wrap(["test"]) ["test"])<block_end><def_stmt>test_date self<block_start>"""Test the Date Method"""<line_sep>test_date="2016-09-01"<line_sep>self.assertEqual(omniture.utils.date(<none>) <none>)<line_sep>self.assertEqual(omniture.utils.date(test_date).strftime("%Y-%m-%d") test_date)<line_sep>d=datetime.date(2016 9 1)<line_sep>self.assertEqual(omniture.utils.date(d).strftime("%Y-%m-%d") test_date)<line_sep>t=datetime.datetime(2016 9 1)<line_sep>self.assertEqual(omniture.utils.date(t).strftime("%Y-%m-%d") test_date)<line_sep>self.assertEqual(omniture.utils.date(u"2016-09-01").strftime("%Y-%m-%d") test_date)<with_stmt>self.assertRaises(ValueError)<block_start>omniture.utils.date({})<block_end><block_end><def_stmt>test_affix self<block_start>"""Test the Affix method to make sure it handles things correctly"""<line_sep>p="pre"<line_sep>s="suf"<line_sep>v="val"<line_sep>con="+"<line_sep>self.assertEqual(omniture.utils.affix(p v connector=con) con.join([p v]))<line_sep>self.assertEqual(omniture.utils.affix(base=v suffix=s connector=con) con.join([v s]))<line_sep>self.assertEqual(omniture.utils.affix(p v s connector=con) con.join([p v s]))<line_sep>self.assertEqual(omniture.utils.affix(base=v connector=con) con.join([v]))<block_end><def_stmt>test_translate self<block_start>"""Test the translate method """<line_sep>t={"product":"cat_collar" "price":100 "location":"no where"}<line_sep>m={"product":"Product_Name" "price":"Cost" "date":"Date"}<line_sep>s={"Product_Name":"cat_collar" "Cost":100 "location":"no where"}<line_sep>self.assertEqual(omniture.utils.translate(t m) s)<block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>jsonschema validate<import_from_stmt>jsonschema.exceptions ValidationError<import_from_stmt>pyhttptest.http_schemas.base_schema base_schema<def_stmt>test_schema_with_valid_data <block_start>data={'name':'Test' 'verb':'GET' 'endpoint':'users' 'host':'http://test.com' }<line_sep>result=validate(instance=data schema=base_schema)<assert_stmt>result<is><none><block_end><def_stmt>test_schema_with_invalid_data <block_start><with_stmt>pytest.raises(ValidationError)<as>exc# Not including a required property 'endpoint'
# from the schema into the ``dict`` below
<block_start>data={'name':'Test' 'verb':'GET' 'host':'http://test.com' }<line_sep>validate(instance=data schema=base_schema)<block_end><assert_stmt>'required property'<in>str(exc.value)<block_end>
|
<import_from_stmt>.trash.tasks permanently_delete_marked_trash mark_old_trash_for_permanent_deletion setup_period_trash_tasks <line_sep>__all__=["permanently_delete_marked_trash" "mark_old_trash_for_permanent_deletion" "setup_period_trash_tasks" ]<line_sep>
|
<def_stmt>projekt # Single line comment
<block_start>print('RepoReapers')<block_end>
|
# -*- coding: utf-8 -*-
# @Author : DevinYang(<EMAIL>)
<import_from_stmt>.utils *<import_from_stmt>.lmdb_dataset *<import_from_stmt>.datasets *<import_from_stmt>.dataprefetcher DataPreFetcher<import_from_stmt>.dynamic_data_provider *<import_from_stmt>.sampler *<line_sep>
|
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>pybullet<as>p<import_from_stmt>.robot Robot<class_stmt>PR2(Robot)<block_start><def_stmt>__init__ self controllable_joints='right'<block_start>right_arm_joint_indices=[42 43 44 46 47 49 50]# Controllable arm joints
left_arm_joint_indices=[64 65 66 68 69 71 72]# Controllable arm joints
wheel_joint_indices=[3 4 5 6 7 8 9 10 11 12 13 14]# Controllable wheel joints
right_end_effector=54# Used to get the pose of the end effector
left_end_effector=76# Used to get the pose of the end effector
right_gripper_indices=[57 58 59 60]# Gripper actuated joints
left_gripper_indices=[79 80 81 82]# Gripper actuated joints
right_tool_joint=54# Joint that tools are attached to
left_tool_joint=76# Joint that tools are attached to
right_gripper_collision_indices=list(range(49 64))# Used to disable collision between gripper and tools
left_gripper_collision_indices=list(range(71 86))# Used to disable collision between gripper and tools
gripper_pos={'scratch_itch':[0.25]<times>4 # Gripper open position for holding tools
'feeding':[0.03]<times>4 'drinking':[0.45]<times>4 'bed_bathing':[0.2]<times>4 'dressing':[0]<times>4 'arm_manipulation':[0.15]<times>4}<line_sep>tool_pos_offset={'scratch_itch':[0 0 0] # Position offset between tool and robot tool joint
'feeding':[0 -0.03 -0.11] 'drinking':[-0.01 0 -0.05] 'bed_bathing':[0 0 0] 'arm_manipulation':[0.125 0 -0.075]}<line_sep>tool_orient_offset={'scratch_itch':[0 0 0] # RPY orientation offset between tool and robot tool joint
'feeding':[-0.2 0 0] 'drinking':[np.pi/2.0 0 0] 'bed_bathing':[0 0 0] 'arm_manipulation':[np.pi/2.0 0 0]}<line_sep>toc_base_pos_offset={'scratch_itch':[0.1 0 0] # Robot base offset before TOC base pose optimization
'feeding':[0.1 0.2 0] 'drinking':[0.2 0.2 0] 'bed_bathing':[-0.1 0 0] 'dressing':[1.7 0.7 0] 'arm_manipulation':[-0.3 0.7 0]}<line_sep>toc_ee_orient_rpy={'scratch_itch':[0 0 0] # Initial end effector orientation
'feeding':[np.pi/2.0 0 0] 'drinking':[0 0 0] 'bed_bathing':[0 0 0] 'dressing':[[0 0 np.pi] [0 0 np.pi<times>3/2.0]] 'arm_manipulation':[0 0 0]}<line_sep>wheelchair_mounted=<false><line_sep>super(PR2 self).__init__(controllable_joints right_arm_joint_indices left_arm_joint_indices wheel_joint_indices right_end_effector left_end_effector right_gripper_indices left_gripper_indices gripper_pos right_tool_joint left_tool_joint tool_pos_offset tool_orient_offset right_gripper_collision_indices left_gripper_collision_indices toc_base_pos_offset toc_ee_orient_rpy wheelchair_mounted half_range=<false>)<block_end><def_stmt>init self directory id np_random fixed_base=<true><block_start>self.body=p.loadURDF(os.path.join(directory 'PR2' 'pr2_no_torso_lift_tall.urdf') useFixedBase=fixed_base basePosition=[-1 -1 0] flags=p.URDF_USE_INERTIA_FROM_FILE physicsClientId=id)<line_sep>super(PR2 self).init(self.body id np_random)<line_sep># Recolor robot
<for_stmt>i [19 42 64]<block_start>p.changeVisualShape(self.body i rgbaColor=[1.0 1.0 1.0 1.0] physicsClientId=id)<block_end><for_stmt>i [43 46 49 58 60 65 68 71 80 82]<block_start>p.changeVisualShape(self.body i rgbaColor=[0.4 0.4 0.4 1.0] physicsClientId=id)<block_end><for_stmt>i [45 51 67 73]<block_start>p.changeVisualShape(self.body i rgbaColor=[0.7 0.7 0.7 1.0] physicsClientId=id)<block_end>p.changeVisualShape(self.body 20 rgbaColor=[0.8 0.8 0.8 1.0] physicsClientId=id)<line_sep>p.changeVisualShape(self.body 40 rgbaColor=[0.6 0.6 0.6 1.0] physicsClientId=id)<block_end><def_stmt>reset_joints self<block_start>super(PR2 self).reset_joints()<line_sep># Position end effectors whith dual arm robots
self.set_joint_angles(self.right_arm_joint_indices [-1.75 1.25 -1.5 -0.5 -1 0 -1])<line_sep>self.set_joint_angles(self.left_arm_joint_indices [1.75 1.25 1.5 -0.5 1 0 1])<block_end><block_end>
|
<import_from_stmt>director_default *<line_sep>f=Foo()<line_sep>f=Foo(1)<line_sep>f=Bar()<line_sep>f=Bar(1)<line_sep>
|
<import_stmt>gzip<import_stmt>io<import_stmt>tarfile<import_stmt>zipfile<import_from_stmt>pathlib Path<import_from_stmt>typing List<import_stmt>pytest<import_from_stmt>gobbli.util TokenizeMethod blob_to_dir detokenize dir_to_blob extract_archive is_archive shuffle_together tokenize <def_stmt>make_zip tmpdir:Path relative_paths:List[Path]<arrow>Path<block_start>"""
Make a zip archive from a list of relative paths.
Create empty files at each path and add them to the archive.
"""<line_sep>zip_path=tmpdir/"test.zip"<with_stmt>zipfile.ZipFile(zip_path "w")<as>z<block_start><for_stmt>relative_path relative_paths<block_start>full_path=tmpdir/relative_path<line_sep>full_path.parent.mkdir(exist_ok=<true> parents=<true>)<line_sep>full_path.touch()<line_sep>z.write(full_path arcname=relative_path)<block_end><block_end><return>zip_path<block_end><def_stmt>make_tar_gz tmpdir:Path relative_paths:List[Path]<arrow>Path<block_start>"""
Make a .tar.gz archive from a list of relative paths.
Create empty files at each path and add them to the archive.
"""<line_sep>tar_path=tmpdir/"test.tar.gz"<with_stmt>tarfile.open(tar_path "w:gz")<as>z<block_start><for_stmt>relative_path relative_paths<block_start>full_path=tmpdir/relative_path<line_sep>full_path.parent.mkdir(exist_ok=<true> parents=<true>)<line_sep>full_path.touch()<line_sep>z.add(str(full_path) arcname=str(relative_path) recursive=<false>)<block_end><block_end><return>tar_path<block_end><def_stmt>make_gz tmpdir:Path name:str<arrow>Path<block_start>"""
Create a gzip-compressed file with the given name under the given temp directory.
Return the path to the compressed file.
"""<line_sep>gzip_path=tmpdir/f"{name}.gz"<with_stmt>gzip.open(gzip_path "wb")<as>z<block_start>z.write(b"Test")<block_end><return>gzip_path<block_end>TEST_ARCHIVE_DATA=["./a" "./b/c"]<line_sep>@pytest.mark.parametrize("archive_func,junk,expected_paths" [(make_zip <false> [Path("a") Path("b")/"c"]) (make_zip <true> [Path("a") Path("c")]) (make_tar_gz <false> [Path("a") Path("b")/"c"]) (make_tar_gz <true> [Path("a") Path("c")]) ] )<def_stmt>test_extract_archive tmpdir archive_func junk expected_paths<block_start>tmpdir_path=Path(tmpdir)<line_sep>archive_path=archive_func(tmpdir_path TEST_ARCHIVE_DATA)<line_sep>archive_extract_dir=tmpdir_path/"extract"<line_sep>extract_archive(archive_path archive_extract_dir junk_paths=junk)<for_stmt>relative_path expected_paths<block_start><assert_stmt>(archive_extract_dir/relative_path).exists()<block_end><block_end><def_stmt>test_extract_gz tmpdir<block_start>tmpdir_path=Path(tmpdir)<line_sep>filename="test.txt"<line_sep>archive_path=make_gz(tmpdir_path "test.txt")<line_sep>archive_extract_dir=tmpdir_path/"extract"<line_sep>extract_archive(archive_path archive_extract_dir)<assert_stmt>(archive_extract_dir/filename).exists()<block_end>@pytest.mark.parametrize("name,expected_is_archive" [("test.tar.gz" <true>) ("test.gz" <true>) ("test.txt.gz" <true>) ("test.zip" <true>) ("test.xz" <false>) ("test.txt" <false>) ("test.vec" <false>) ("test.bin" <false>) ] )<def_stmt>test_is_archive name expected_is_archive<block_start><assert_stmt>is_archive(Path(name))<eq>expected_is_archive<block_end><def_stmt>test_dir_to_blob tmpdir<block_start>test_dir=Path(tmpdir)/"test"<line_sep>test_dir.mkdir()<line_sep>test_file_name="test.txt"<line_sep>test_file=test_dir/test_file_name<line_sep>file_contents="test"<line_sep>test_file.write_text(file_contents)<line_sep>blob=dir_to_blob(test_dir)<line_sep>fileobj=io.BytesIO(blob)<line_sep>fileobj.seek(0)<line_sep>extract_path=test_dir/"test2"<with_stmt>tarfile.open(fileobj=fileobj mode="r:gz")<as>archive<block_start>archive.extractall(extract_path)<block_end>extracted_file=extract_path/test_file_name<assert_stmt>extracted_file.exists()<assert_stmt>extracted_file.read_text()<eq>file_contents<block_end><def_stmt>test_blob_to_dir tmpdir<block_start>test_dir=Path(tmpdir)/"test"<line_sep>test_dir.mkdir()<line_sep>test_file_name="test.txt"<line_sep>test_file=test_dir/test_file_name<line_sep>file_contents="test"<line_sep>test_file.write_text(file_contents)<line_sep>blob=dir_to_blob(test_dir)<line_sep>extract_path=test_dir/"test2"<line_sep>blob_to_dir(blob extract_path)<line_sep>extracted_file=extract_path/test_file_name<assert_stmt>extracted_file.exists()<assert_stmt>extracted_file.read_text()<eq>file_contents<block_end>@pytest.mark.parametrize("l1,l2,err" [([] [] <none>) (["a"] [1] <none>) (["a" "b"] [1] ValueError) (["a" "b"] [1 2] <none>) (["a" "b" "c"] [1 2 3] <none>) (["a" "b" "c" "d"] [1 2 3 4] <none>) ] )<def_stmt>test_shuffle_together l1 l2 err<block_start>seed=1<if_stmt>err<is><not><none><block_start><with_stmt>pytest.raises(err)<block_start>shuffle_together(l1 l2 seed=seed)<block_end><block_end><else_stmt><block_start>original_rows=set(zip(l1 l2))<line_sep>shuffle_together(l1 l2 seed=seed)<for_stmt>row zip(l1 l2)<block_start><assert_stmt>tuple(row)<in>original_rows<block_end><block_end><block_end>@pytest.mark.parametrize("text,tokens" [("This is a test." ["this" "is" "a" "test."]) ("Two spaces" ["two" "spaces"]) ("Hyphenated-word" ["hyphenated-word"]) ("Numbers 1 and 2" ["numbers" "1" "and" "2"]) ] )<def_stmt>test_tokenize_split text tokens# Whitespace tokenization just splits on whitespace
<block_start><assert_stmt>tokenize(TokenizeMethod.SPLIT [text])<eq>[tokens]<block_end>@pytest.mark.parametrize("text,tokens" [("This is a test." ["this" "is" "a" "test"]) ("Two spaces" ["two" "spaces"]) ("Hyphenated-word" ["hyphenated" "word"]) ("Numbers 1 and 2" ["numbers" "and"]) ] )<def_stmt>test_tokenize_spacy text tokens# Spacy tokenization lowercases and removes non-alphabetic tokens
<block_start><assert_stmt>tokenize(TokenizeMethod.SPACY [text])<eq>[tokens]<block_end>@pytest.mark.parametrize("tokenize_method" [TokenizeMethod.SPLIT TokenizeMethod.SPACY])@pytest.mark.parametrize("tokens,text" [(["this" "is" "a" "test"] "this is a test") (["hyphenated-word"] "hyphenated-word") (["try" "," "punctuation" "."] "try , punctuation .") ] )<def_stmt>test_detokenize_split_spacy text tokens tokenize_method<block_start><assert_stmt>detokenize(tokenize_method [tokens])<eq>[text]<block_end>@pytest.mark.parametrize("model_path" [Path("spm") <none>])<def_stmt>test_tokenize_detokenize_sentencepiece tmpdir model_path<block_start>texts=["a b c" "a ab c" "a b ac"]<line_sep># Model should be trained
<if_stmt>model_path<is><not><none><block_start>model_path=Path(tmpdir)/model_path<block_end>tokens=tokenize(TokenizeMethod.SENTENCEPIECE texts model_path=model_path vocab_size=7)<line_sep># Control sequence indicating whitespace
_="▁"<line_sep>expected_tokens=[[_ "a" _ "b" _ "c"] [_ "a" _ "a" "b" _ "c"] [_ "a" _ "b" _ "a" "c"] ]<assert_stmt>tokens<eq>expected_tokens<line_sep># Can't detokenize if we didn't give a persistent model path to the tokenize
# function
<if_stmt>model_path<is><not><none><block_start><assert_stmt>detokenize(TokenizeMethod.SENTENCEPIECE tokens model_path)<eq>texts<line_sep># Previously should be reused with the old vocab size, and a new model
# shouldn't be trained
tokens=tokenize(TokenizeMethod.SENTENCEPIECE texts model_path=model_path)<assert_stmt>tokens<eq>expected_tokens<block_end><block_end>
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
<import_stmt>logging<import_stmt>sys<import_from_stmt>logging handlers<class_stmt>Singleton(type)<block_start>_instances={}<def_stmt>__call__ cls *args **kwargs<block_start><if_stmt>cls<not><in>cls._instances<block_start>cls._instances[cls]=super(Singleton cls).__call__(*args **kwargs)<block_end><return>cls._instances[cls]<block_end><block_end><class_stmt>Logger(metaclass=Singleton)# Predefined log level includes, from highest to lowest severity:
# CRITICAL, ERROR, WARNING, INFO, DEBUG
<block_start><def_stmt>__init__ self filename=<none> level='INFO' when='D' backCount=3 fmt='[%(asctime)s] %(message)s'<block_start><assert_stmt>filename<is><not><none><line_sep>self.filename=filename<line_sep>self.logger=logging.getLogger(filename)<line_sep>format_str=logging.Formatter(fmt)<line_sep>self.logger.setLevel(logging.getLevelName(level))<line_sep>sh=logging.StreamHandler(sys.stdout)<line_sep>sh.setFormatter(format_str)<line_sep>th=handlers.TimedRotatingFileHandler(filename=filename when=when backupCount=backCount encoding='utf-8')<line_sep>th.setFormatter(format_str)<line_sep>self.logger.addHandler(sh)<line_sep>self.logger.addHandler(th)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>log=Logger('all.log' level='ERROR')<line_sep>log.logger.debug('debug')<line_sep>log.logger.info('info')<line_sep>log.logger.warning('warning')<line_sep>log.logger.error('error')<line_sep>log.logger.critical('critical')<block_end>
|
<import_stmt>torch<import_from_stmt>torch.nn functional<as>F<import_from_stmt>up.utils.general.registry_factory MASK_PREDICTOR_REGISTRY<import_from_stmt>up.utils.general.fp16_helper to_float32<import_from_stmt>up.tasks.det.plugins.condinst.models.head.condinst_head aligned_bilinear<line_sep>@MASK_PREDICTOR_REGISTRY.register('condinst')<class_stmt>MaskPredictorCondinst(object)<block_start><def_stmt>__init__ self <block_start><pass><block_end>@torch.no_grad()@to_float32<def_stmt>predict self mask_head input locations controller mask_gen_params<block_start>mask_feats=input['mask_feats']<line_sep>image_info=input['image_info']<line_sep>image=input['image']<line_sep>bboxes=input['dt_bboxes']<line_sep>mask_head_params,fpn_levels,instance_locations,im_inds,pred_boxes=self.get_pred_instances(input controller mask_gen_params)<line_sep>mask_logits=mask_head.mask_heads_forward_with_coords(mask_feats locations mask_head_params fpn_levels instance_locations im_inds)<line_sep>pred_global_masks=mask_logits.sigmoid()<line_sep>dt_bboxes=[]<line_sep>dt_masks=[]<for_stmt>im_id,(image_size ) enumerate(zip(image_info))<block_start>ind_per_im=torch.nonzero(im_inds<eq>im_id)[: 0]<line_sep>pred_masks,ind_per_im_keep=self.postprocess(image ind_per_im image_size pred_boxes pred_global_masks)<line_sep>dt_bboxes.append(bboxes[ind_per_im_keep])<for_stmt>idx range(len(ind_per_im_keep))<block_start>dt_masks.append(pred_masks[idx].detach().cpu().numpy())<block_end><block_end>dt_bboxes=torch.cat(dt_bboxes dim=0)<line_sep><return>{'dt_masks':dt_masks 'dt_bboxes':dt_bboxes}<block_end><def_stmt>get_pred_instances self input controller mask_gen_params<block_start>B=controller[0].shape[0]<line_sep>K=sum([x.shape[1]<for>x controller])<line_sep>bboxes=input['dt_bboxes']<line_sep>pos_inds=input['pos_inds']<line_sep>im_inds,cls_rois,scores,cls=torch.split(bboxes [1 4 1 1] dim=1)<line_sep>im_inds=im_inds.squeeze().type(torch.LongTensor).to(pos_inds.device)<line_sep>pos_inds=pos_inds.squeeze().add(im_inds<times>K).type(torch.LongTensor)<line_sep>mask_head_params=torch.cat(controller dim=1).reshape(-1 mask_gen_params)[pos_inds]<line_sep>mlvl_locations=input['mlvl_locations']<line_sep>instance_locations=torch.cat(mlvl_locations).repeat(B 1)[pos_inds]<line_sep>fpn_levels=torch.cat([mlvl_locations[lvl_num].new_ones(len(mlvl_locations[lvl_num]) dtype=torch.long)<times>lvl_num<for>lvl_num range(len(mlvl_locations))])<line_sep>fpn_levels=fpn_levels.repeat(B)[pos_inds].type(torch.LongTensor)<line_sep><return>mask_head_params fpn_levels instance_locations im_inds cls_rois<block_end><def_stmt>postprocess self image ind_per_im image_size pred_boxes pred_global_masks=<none> mask_threshold=0.5<block_start>padded_im_h,padded_im_w=(image.shape[-2] image.shape[-1])<line_sep>resized_im_h,resized_im_w=(image_size[0] image_size[1])<line_sep>output_height,output_width=(image_size[3] image_size[4])<line_sep>scale_x,scale_y=(output_width/resized_im_w output_height/resized_im_h)<line_sep>output_boxes=pred_boxes[ind_per_im]<line_sep>output_boxes[: 0::2]<augmul>scale_x<line_sep>output_boxes[: 1::2]<augmul>scale_y<line_sep>output_boxes[: 0]=torch.clamp(output_boxes[: 0] min=0 max=output_width)<line_sep>output_boxes[: 1]=torch.clamp(output_boxes[: 1] min=0 max=output_height)<line_sep>output_boxes[: 2]=torch.clamp(output_boxes[: 2] min=0 max=output_width)<line_sep>output_boxes[: 3]=torch.clamp(output_boxes[: 3] min=0 max=output_height)<line_sep>keep_inds=((output_boxes[: 2]-output_boxes[: 0])<g>0.0)&((output_boxes[: 3]-output_boxes[: 1])<g>0.0)<line_sep>ind_per_im=ind_per_im[keep_inds]<if_stmt>pred_global_masks<is><not><none><block_start>pred_global_masks=pred_global_masks[ind_per_im]<line_sep>mask_h,mask_w=pred_global_masks.size()[-2:]<line_sep>factor_h=padded_im_h<floordiv>mask_h<line_sep>factor_w=padded_im_w<floordiv>mask_w<assert_stmt>factor_h<eq>factor_w<line_sep>factor=factor_h<line_sep>pred_global_masks=aligned_bilinear(pred_global_masks factor)<line_sep>pred_global_masks=pred_global_masks[: : :resized_im_h :resized_im_w]<line_sep>pred_global_masks=F.interpolate(pred_global_masks size=(output_height output_width) mode="bilinear" align_corners=<false>)<line_sep>pred_global_masks=pred_global_masks[: 0 : :]<line_sep>pred_masks=(pred_global_masks<g>mask_threshold).float()<block_end><return>pred_masks ind_per_im<block_end><block_end><def_stmt>build_mask_predictor predictor_cfg<block_start><return>MASK_PREDICTOR_REGISTRY.build(predictor_cfg)<block_end>
|
<import_stmt>re<import_stmt>sys<try_stmt><block_start><import_from_stmt>. lib_util<import_from_stmt>. log<block_end><except_stmt>ModuleNotFoundError<block_start><import_stmt>lib_util<import_stmt>log<block_end><class_stmt>Target(object)<block_start>MainMenu=['離開,再見…' '人, 我是' '[呼叫器]' ]<line_sep>MainMenu_Exiting=['【主功能表】' '您確定要離開' ]<line_sep>QueryPost=['請按任意鍵繼續' '───────┘' ]<line_sep>InBoard=['看板資訊/設定' '文章選讀' '相關主題']<line_sep>InBoardWithCursor=['【' '看板資訊/設定' ]<line_sep># (h)說明 (←/q)離開
# (y)回應(X%)推文(h)說明(←)離開
# (y)回應(X/%)推文 (←)離開
InPost=['瀏覽' '頁' ')離開']<line_sep>PostEnd=['瀏覽' '頁 (100%)' ')離開']<line_sep>InWaterBallList=['瀏覽' '頁' '說明' ]<line_sep>WaterBallListEnd=['瀏覽' '頁 (100%)' '說明']<line_sep>PostIP_New=['※ 發信站: 批踢踢實業坊(ptt.cc), 來自:']<line_sep>PostIP_Old=['◆ From:']<line_sep>Edit=['※ 編輯']<line_sep>PostURL=['※ 文章網址']<line_sep>Vote_Type1=['◆ 投票名稱' '◆ 投票中止於' '◆ 票選題目描述']<line_sep>Vote_Type2=['投票名稱' '◆ 預知投票紀事' ]<line_sep>AnyKey='任意鍵'<line_sep>InTalk=['【聊天說話】' '線上使用者列表' '查詢網友' '顯示上幾次熱訊']<line_sep>InUserList=['休閒聊天' '聊天/寫信' '說明' ]<line_sep>InMailBox=['【郵件選單】' '鴻雁往返']<line_sep>InMailMenu=['【電子郵件】' '我的信箱' '把所有私人資料打包回去' '寄信給帳號站長' ]<line_sep>PostNoContent=['◆ 此文章無內容' AnyKey]<line_sep>InBoardList=['【看板列表】' '選擇看板' '只列最愛' '已讀/未讀']<line_sep>UseTooManyResources=['程式耗用過多計算資源']<line_sep>Animation=['★ 這份文件是可播放的文字動畫,要開始播放嗎?']<line_sep>CursorToGoodbye=MainMenu.copy()<block_end><def_stmt>show config screen_queue function_name=<none><block_start><if_stmt>config.log_level<ne>log.level.TRACE<block_start><return><block_end><if_stmt>isinstance(screen_queue list)<block_start><for_stmt>Screen screen_queue<block_start>print('-'<times>50)<try_stmt><block_start>print(Screen.encode(sys.stdin.encoding "replace").decode(sys.stdin.encoding))<block_end><except_stmt>Exception<block_start>print(Screen.encode('utf-8' "replace").decode('utf-8'))<block_end><block_end><block_end><else_stmt><block_start>print('-'<times>50)<try_stmt><block_start>print(screen_queue.encode(sys.stdin.encoding "replace").decode(sys.stdin.encoding))<block_end><except_stmt>Exception<block_start>print(screen_queue.encode('utf-8' "replace").decode('utf-8'))<block_end>print('len:'+str(len(screen_queue)))<block_end><if_stmt>function_name<is><not><none><block_start>print('錯誤在 '+function_name+' 函式發生')<block_end>print('-'<times>50)<block_end>displayed=<false><def_stmt>vt100 ori_screen:str no_color:bool=<true><arrow>str<block_start>result=ori_screen<if_stmt>no_color<block_start>result=re.sub('\x1B\[[\d+;]*m' '' result)<block_end>result=re.sub(r'[\x1B]' '=PTT=' result)<line_sep># global displayed
# if not displayed:
# display = ('★' in result)
# if display:
# displayed = True
# else:
# display = False
#
# if display:
# print('=1=' * 10)
# print(result)
# print('=2=' * 10)
# result = '\n'.join(
# [x.rstrip() for x in result.split('\n')]
# )
# 編輯文章時可能會有莫名的清空問題,需再注意
# if result.endswith('=PTT=[H'):
# print('!!!!!!!!=PTT=[H=PTT=[H=PTT=!!!!!!!!!!!!!!!')
<while_stmt>'=PTT=[H'<in>result<block_start><if_stmt>result.count('=PTT=[H')<eq>1<and>result.endswith('=PTT=[H')<block_start><break><block_end>result=result[result.find('=PTT=[H')+len('=PTT=[H'):]<block_end><while_stmt>'=PTT=[2J'<in>result<block_start>result=result[result.find('=PTT=[2J')+len('=PTT=[2J'):]<block_end>pattern_result=re.compile('=PTT=\[(\d+);(\d+)H$').search(result)<line_sep>last_position=<none><if_stmt>pattern_result<is><not><none># print(f'Before [{pattern_result.group(0)}]')
<block_start>last_position=pattern_result.group(0)<block_end># 進入 PTT 時,有時候會連分類看版一起傳過來然後再用主功能表畫面直接繪製畫面
# 沒有[H 或者 [2J 導致後面的繪製行數錯誤
<if_stmt>'=PTT=[1;3H主功能表'<in>result<block_start>result=result[result.find('=PTT=[1;3H主功能表')+len('=PTT=[1;3H主功能表'):]<block_end># if '=PTT=[1;' in result:
# if last_position is None:
# result = result[result.rfind('=PTT=[1;'):]
# elif not last_position.startswith('=PTT=[1;'):
# result = result[result.rfind('=PTT=[1;'):]
# print('-'*50)
# print(result)
result_list=re.findall('=PTT=\[(\d+);(\d+)H' result)<for_stmt>(line_count space_count) result_list<block_start>line_count=int(line_count)<line_sep>space_count=int(space_count)<line_sep>current_line=result[:result.find(f'[{line_count};{space_count}H')].count('\n')+1<line_sep># if display:
# print(f'>{line_count}={space_count}<')
# print(f'>{current_line}<')
<if_stmt>current_line<g>line_count# if LastPosition is None:
# pass
# elif LastPosition != f'=PTT=[{line_count};{space_count}H':
# print(f'current_line [{current_line}]')
# print(f'line_count [{line_count}]')
# print('Clear !!!')
# print(f'!!!!!!!!=PTT=[{line_count};{space_count}H')
<block_start>result_lines=result.split('\n')<line_sep>target_line=result_lines[line_count-1]<if_stmt>f'=PTT=[{line_count};{space_count}H=PTT=[K'<in>result# 如果有 K 則把該行座標之後,全部抹除
<block_start>target_line=target_line[:space_count-1]<line_sep># OriginIndex = -1
origin_line=<none><line_sep># for i, line in enumerate(result_lines):
<for_stmt>line result_lines<block_start><if_stmt>f'=PTT=[{line_count};{space_count}H=PTT=[K'<in>line# OriginIndex = i
<block_start>origin_line=line<line_sep><break><block_end><block_end><if_stmt>origin_line.count('=PTT=')<g>2<block_start>origin_line=origin_line[:lib_util.findnth(origin_line '=PTT=' 3)]<block_end># result_lines[OriginIndex] = result_lines[OriginIndex].replace(
# origin_line,
# ''
# )
origin_line=origin_line[len(f'=PTT=[{line_count};{space_count}H=PTT=[K'):]<line_sep># log.showValue(
# log.level.INFO,
# 'origin_line',
# origin_line
# )
new_target_line=f'{target_line}{origin_line}'<line_sep>result_lines[line_count-1]=new_target_line<block_end>result='\n'.join(result_lines)<block_end><elif_stmt>current_line<eq>line_count# print(f'!!!!!=PTT=[{line_count};{space_count}H')
<block_start>current_space=result[:result.find(f'=PTT=[{line_count};{space_count}H')]<line_sep>current_space=current_space[current_space.rfind('\n')+1:]<line_sep># if display:
# print(f'>>{current_space}<<')
# print(f'ori length>>{len(current_space)}<<')
# newversion_length = len(current_space.encode('big5uao', 'ignore'))
# print(f'newversion_length >>{newversion_length}<<')
# current_space = len(current_space.encode('big5', 'replace'))
current_space=len(current_space)<line_sep># if display:
# print(f'!!!!!{current_space}')
<if_stmt>current_space<g>space_count# if display:
# print('1')
<block_start>result=result.replace(f'=PTT=[{line_count};{space_count}H' (line_count-current_line)<times>'\n'+space_count<times>' ')<block_end><else_stmt># if display:
# print('2')
<block_start>result=result.replace(f'=PTT=[{line_count};{space_count}H' (line_count-current_line)<times>'\n'+(space_count-current_space)<times>' ')<block_end><block_end><else_stmt><block_start>result=result.replace(f'=PTT=[{line_count};{space_count}H' (line_count-current_line)<times>'\n'+space_count<times>' ')<block_end># while '=PTT=[K' in result:
# Target = result[result.find('=PTT=[K'):]
# print(f'Target[{Target}]')
# index1 = Target.find('\n')
# index2 = Target.find('=PTT=')
# if index2 == 0:
# index = index1
# else:
# index = min(index1, index2)
# break
# Target = Target[:index]
# print('===' * 20)
# print(result)
# print('-=-' * 20)
# print(Target)
# print('===' * 20)
# result = result.replace(Target, '')
# print(Target)
# print('===' * 20)
<block_end><if_stmt>last_position<is><not><none><block_start>result=result.replace(last_position '')<block_end># if display:
# print('-Final-' * 10)
# print(result)
# print('-Final-' * 10)
<return>result<block_end>
|
#!python
## Copyright (c) 2021 <NAME>
## Licensed under: MIT License
<import_from_stmt>markdown markdown<import_from_stmt>os.path join<import_stmt>os sys shutil re<line_sep>## TODO: This is a quick and dirty script to generate html
## from markdown. Refactor this file in the future.
## Usage:
## to generate pages : python generate.py
## to clean pages : python generate.py (-c, --clean)
TEMPLATE_PATH='static/template.html'<line_sep>ROOT_URL='https://thakeenathees.github.io/pocketlang/'<line_sep>## Home page should be in the SOURCE_DIR.
HOME_PAGE='home.md'<line_sep>TRY_PAGE='try-it-now.html'<line_sep>SOURCE_DIR='pages/'<line_sep>TARGET_DIR='build/'<line_sep>STATIC_DIR='static/'<line_sep>## Additional source files of wasm try online page.
WASM_SOURCE_FILES='''\
<script type="text/javascript" src="{{ STATIC_DIR }}codejar/codejar.js"></script>
<script type="text/javascript" src="{{ STATIC_DIR }}codejar/linenumbers.js"></script>
<link rel="stylesheet" type="text/css" href="{{ STATIC_DIR }}codejar/style.css" />
<script type="text/javascript" src="{{ STATIC_DIR }}prism/prism.js"></script>
<link rel="stylesheet" type="text/css" href="{{ STATIC_DIR }}prism/prism.css" />
<script type="text/javascript" src="{{ STATIC_DIR }}try_now.js"></script>
'''<line_sep>## Navigation pages in order. Should match the path names.
## Any file/folder name shouldn't contain white space.
PAGES=[('Getting-Started' [TRY_PAGE 'learn-in-15-minutes.md' 'build-from-source.md' 'contributing.md' ]) ('Language-API' ['variables.md' 'functions.md' 'fibers.md' 'modules.md' ]) ]<def_stmt>new_context <block_start><return>{'{{ TITLE }}':'' '{{ NAVIGATION }}':'' '{{ CONTENT }}':'' '{{ HOME_URL }}':'' '{{ STATIC_DIR }}':'' }<block_end><def_stmt>main ## Remove generated files and create empty target dir with static files.
<block_start><if_stmt>os.path.exists(TARGET_DIR)<block_start>remove_ignore=('.git' )<for_stmt>_dir os.listdir(TARGET_DIR)<block_start><if_stmt>_dir<in>remove_ignore<block_start><continue><block_end><if_stmt>os.path.isdir(join(TARGET_DIR _dir))<block_start>shutil.rmtree(join(TARGET_DIR _dir))<block_end><else_stmt><block_start>os.remove(join(TARGET_DIR _dir))<block_end><block_end><block_end>shutil.copytree(STATIC_DIR join(TARGET_DIR STATIC_DIR))<line_sep>open(join(TARGET_DIR '.nojekyll') 'w').close()<line_sep>## Initialize the template and navigation.
template=''<line_sep>navigation=generate_navigation()<with_stmt>open(TEMPLATE_PATH 'r')<as>f<block_start>template=f.read()<block_end>## Generate the home page.
index_html=join(TARGET_DIR 'index.html')<line_sep>ctx=generate_page_context(join(SOURCE_DIR HOME_PAGE) index_html navigation)<line_sep>write_page(ctx template index_html)<for_stmt>entry PAGES## entry = ('dirname', [files...])
<block_start>_dir=entry[0]<for_stmt>file entry[1]<block_start>ext=get_validated_ext(file)<line_sep>path=join(SOURCE_DIR _dir file)<line_sep>dst=''<line_sep>path_prefix=_dir.lower().replace(' ' '-')+'-'<if_stmt>ext<eq>'.md'<block_start>dst=join(TARGET_DIR path_prefix+file.replace('.md' '.html'))<block_end><else_stmt><block_start>dst=join(TARGET_DIR path_prefix+file)<block_end>ctx=generate_page_context(path dst navigation)<line_sep>_template=template<if_stmt>file<eq>TRY_PAGE<block_start>_template=template.replace('{{ WASM_SOURCE_FILES }}' WASM_SOURCE_FILES)<block_end>write_page(ctx _template dst)<block_end><block_end><pass><block_end><def_stmt>generate_navigation <block_start>navigation=''<for_stmt>entry PAGES<block_start>_dir=entry[0]<line_sep>title=_dir.replace('-' ' ').title()<line_sep>navigation<augadd>'<div class="navigation">\n'<line_sep>navigation<augadd>'<h3><strong>%s</strong></h3>\n'%(title)<line_sep>navigation<augadd>'<ul class="menu">\n'<for_stmt>file entry[1]<block_start>ext=get_validated_ext(file)<line_sep>link=''## Assuming that file name don't contain '.md' at the middle.
path_prefix=_dir.lower().replace(' ' '-')+'-'<if_stmt>ext<eq>'.md'<block_start>link=join(ROOT_URL path_prefix+file.replace('.md' '.html'))<block_end><else_stmt><block_start>link=join(ROOT_URL path_prefix+file)<block_end>link=link.replace('\\' '/')<line_sep>title=file.replace(ext '').replace('-' ' ').title()<line_sep>navigation<augadd>'<li><a href="%s">%s</a></li>\n'%(link title)<block_end>navigation<augadd>'</ul>\n'<line_sep>navigation<augadd>'</div>\n'<block_end><return>navigation<block_end><def_stmt>generate_page_context src dst navigation<block_start>title=path_to_title(src)<line_sep>content=path_to_content(src)<line_sep>ctx=new_context()<line_sep>ctx['{{ TITLE }}']=title<line_sep>ctx['{{ NAVIGATION }}']=navigation<line_sep>ctx['{{ CONTENT }}']=content<line_sep>ctx['{{ HOME_URL }}']=ROOT_URL+'index.html'<line_sep>ctx['{{ STATIC_DIR }}']=STATIC_DIR<line_sep><return>ctx<line_sep><block_end><def_stmt>get_validated_ext path<block_start>ext=''<if_stmt>path.endswith('.md')<block_start>ext='.md'<block_end><elif_stmt>path.endswith('.html')<block_start>ext='.html'<block_end><else_stmt><block_start><raise>Exception('Expected .md / .html file.')<block_end><return>ext<block_end>## Get the title from the src path.
<def_stmt>path_to_title path<block_start>ext=get_validated_ext(path)<line_sep>title=os.path.basename(path).replace(ext '').title()<line_sep>title<augadd>' - PocketLang'<line_sep><return>title<block_end>## Generate html content from the markdown source path.
## If the path is an .html file return it's content.
<def_stmt>path_to_content src<block_start>text=''<with_stmt>open(src 'r')<as>f<block_start>text=f.read()<block_end>## If html file we're done.
<if_stmt>get_validated_ext(src)<eq>'.html'<block_start><return>text<block_end><assert_stmt>(src.endswith('.md'))<line_sep>text=custom_md_override(text)<line_sep>content=markdown(text extensions=['codehilite' 'fenced_code'])<line_sep>## A wakey way to inject html overrides to highlight out language
## I'm not focusing on generating the pages and this is a wakey way to
## do so. This should be done with a good static page generater instead
## of this script.
<return>custom_html_override(src content)<block_end>## Inject our custom markdown text override.
<def_stmt>custom_md_override text## Add html anchor.
<block_start><for_stmt>pre ('#' '##' '###')<block_start>pattern='(^'+pre+r' \s*%%(.*)%%\n)'<for_stmt>match,title re.findall(pattern text flags=re.MULTILINE)<block_start>link=title.strip().lower().replace(' ' '-')<line_sep>text=text.replace(match f'{pre} {title} <a href="#{link}" name="{link}" class="anchor">#</a>')<block_end><block_end><return>text<block_end>## Inject our custom html overrides.
<def_stmt>custom_html_override src content## FIXME: I should create a pygment lexer.
## A dirty way to inject our keyword (to ruby's).
<block_start>addnl_keywords=['null' 'from' 'import' 'as' 'func' 'native' 'continue']<line_sep>not_keyword=['alias' 'begin' 'case' 'next' 'nil' 'redo' 'rescue' 'retry' 'ensure' 'undef' 'unless' 'super' 'until' 'when' 'defined' ]<for_stmt>kw addnl_keywords<block_start>content=content.replace('<span class="n">%s</span>'%kw '<span class="k">%s</span>'%kw)<block_end><for_stmt>nk not_keyword<block_start>content=content.replace('<span class="k">%s</span>'%nk '<span class="n">%s</span>'%nk)<block_end>## codehilite mark the compilation command as error.
content=content.replace('<span class="err">' '<span>')<line_sep><return>content<block_end><def_stmt>write_page ctx template dst<block_start>_dir=os.path.dirname(dst)<if_stmt>_dir<not><in>('.' './' '')<and><not>os.path.exists(_dir)<block_start>os.makedirs(os.path.dirname(dst))<block_end>page=template<for_stmt>key,value ctx.items()<block_start>page=page.replace(key value)<block_end>page=page.replace('{{ WASM_SOURCE_FILES }}' '')<with_stmt>open(dst 'w')<as>f<block_start>f.write(page)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>_local=<false><if_stmt>len(sys.argv)<ge>2<block_start><if_stmt>sys.argv[1]<eq>'local'<block_start>_local=<true><line_sep>#ROOT_URL = 'http://localhost:8000/'
<block_end><block_end>ROOT_URL=''## No more nested directory pages.
main()<line_sep>## Write a batch file to start the server in windows.
<if_stmt>_local<and>os.name<eq>'nt'<block_start><with_stmt>open(join(TARGET_DIR 'server.bat') 'w')<as>f<block_start>f.write('python -m http.server 8000')<block_end><block_end>print('Static pages generated'+('for localhost:8000.'<if>_local<else>'.'))<block_end>
|
<import_stmt>unittest<import_from_stmt>neuroptica.layers Activation ClementsLayer<import_from_stmt>neuroptica.losses CategoricalCrossEntropy MeanSquaredError<import_from_stmt>neuroptica.models Sequential<import_from_stmt>neuroptica.nonlinearities *<import_from_stmt>neuroptica.optimizers Optimizer<import_from_stmt>tests.base NeuropticaTest<import_from_stmt>tests.test_models TestModels<class_stmt>TestLosses(NeuropticaTest)<block_start>'''Tests for model losses'''<def_stmt>test_loss_gradients self<block_start>N=7<line_sep>losses=[MeanSquaredError CategoricalCrossEntropy]<for_stmt>loss losses<block_start>print("Testing loss {}".format(loss))<line_sep>batch_size=6<line_sep>n_samples=batch_size<times>4<line_sep># Generate random points and label them (one-hot) according to index of max element
X_all=(2<times>np.random.rand(N<times>n_samples)-1).reshape((N n_samples))# random N-D points
X_max=np.argmax(X_all axis=0)<line_sep>Y_all=np.zeros((N n_samples))<line_sep>Y_all[X_max np.arange(n_samples)]=1.0<line_sep># Make a single-layer model
model=Sequential([ClementsLayer(N) Activation(AbsSquared(N))])<for_stmt>X,Y Optimizer.make_batches(X_all Y_all batch_size)# Propagate the data forward
<block_start>Y_hat=model.forward_pass(X)<line_sep>d_loss=loss.dL(Y_hat Y)<line_sep># Compute the backpropagated signals for the model
gradients=model.backward_pass(d_loss)<line_sep>TestModels.verify_model_gradients(model X Y loss.L gradients epsilon=1e-6)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
<def_stmt>func foo=print<block_start>"""test"""<block_end>
|
<import_from_stmt>rest_framework serializers<import_from_stmt>attendance.models SSIDName<class_stmt>SSIDNameSerializer(serializers.ModelSerializer)<block_start><class_stmt>Meta<block_start>model=SSIDName<line_sep>fields=['name']<line_sep>read_only_fields=['name']<block_end><block_end>
|
<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>django.test TestCase<import_from_stmt>.models RecipeIngredient Recipe<line_sep>User=get_user_model()<class_stmt>UserTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>self.user_a=User.objects.create_user('cfe' password='<PASSWORD>')<block_end><def_stmt>test_user_pw self<block_start>checked=self.user_a.check_password("<PASSWORD>")<line_sep>self.assertTrue(checked)<block_end><block_end><class_stmt>RecipeTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>self.user_a=User.objects.create_user('cfe' password='<PASSWORD>')<line_sep>self.recipe_a=Recipe.objects.create(name='Grilled Chicken' user=self.user_a)<line_sep>self.recipe_b=Recipe.objects.create(name='Grilled Chicken Tacos' user=self.user_a)<line_sep>self.recipe_ingredient_a=RecipeIngredient.objects.create(recipe=self.recipe_a name='Chicken' quantity='1/2' unit='pound')<line_sep>self.recipe_ingredient_b=RecipeIngredient.objects.create(recipe=self.recipe_a name='Chicken' quantity='asdfasd' unit='pound')<block_end><def_stmt>test_user_count self<block_start>qs=User.objects.all()<line_sep>self.assertEqual(qs.count() 1)<block_end><def_stmt>test_user_recipe_reverse_count self<block_start>user=self.user_a<line_sep>qs=user.recipe_set.all()<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_user_recipe_forward_count self<block_start>user=self.user_a<line_sep>qs=Recipe.objects.filter(user=user)<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_recipe_ingredient_reverse_count self<block_start>recipe=self.recipe_a<line_sep>qs=recipe.recipeingredient_set.all()<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_recipe_ingredientcount self<block_start>recipe=self.recipe_a<line_sep>qs=RecipeIngredient.objects.filter(recipe=recipe)<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_user_two_level_relation self<block_start>user=self.user_a<line_sep>qs=RecipeIngredient.objects.filter(recipe__user=user)<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_user_two_level_relation_reverse self<block_start>user=self.user_a<line_sep>recipeingredient_ids=list(user.recipe_set.all().values_list('recipeingredient__id' flat=<true>))<line_sep>qs=RecipeIngredient.objects.filter(id__in=recipeingredient_ids)<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_user_two_level_relation_via_recipes self<block_start>user=self.user_a<line_sep>ids=user.recipe_set.all().values_list("id" flat=<true>)<line_sep>qs=RecipeIngredient.objects.filter(recipe__id__in=ids)<line_sep>self.assertEqual(qs.count() 2)<block_end><def_stmt>test_unit_measure_validation self<block_start>invalid_unit='ounce'<line_sep>ingredient=RecipeIngredient(name='New' quantity=10 recipe=self.recipe_a unit=invalid_unit)<line_sep>ingredient.full_clean()<block_end><def_stmt>test_unit_measure_validation_error self<block_start>invalid_units=['nada' 'asdfadsf']<with_stmt>self.assertRaises(ValidationError)<block_start><for_stmt>unit invalid_units<block_start>ingredient=RecipeIngredient(name='New' quantity=10 recipe=self.recipe_a unit=unit)<line_sep>ingredient.full_clean()<block_end><block_end><block_end><def_stmt>test_quantity_as_float self<block_start>self.assertIsNotNone(self.recipe_ingredient_a.quantity_as_float)<line_sep>self.assertIsNone(self.recipe_ingredient_b.quantity_as_float)<block_end><block_end>
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>datetime<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>uuid<import_from_stmt>google.cloud bigquery<import_stmt>bq_utils<line_sep>GCLOUD_PROJECT_ENV='GCLOUD_PROJECT'<line_sep>DATETIME_FORMAT='%Y%m%d'<line_sep>DATASET_NAME='python_clientlibs_download_by_week'<line_sep>VENEER_TABLE_NAME='veneer_client_libs'<line_sep>STACKDRIVER_TABLE_NAME='stackdriver_client_libs'<line_sep>GRPC_TABLE_NAME='grpc_lib'<line_sep>THIRD_PARTY_TABLE_NAME='third_party_client_libs'<line_sep>TABLES=[VENEER_TABLE_NAME GRPC_TABLE_NAME STACKDRIVER_TABLE_NAME THIRD_PARTY_TABLE_NAME ]<line_sep>CLIENTLIBS={VENEER_TABLE_NAME:['google-cloud-core' 'google-cloud-speech' 'google-cloud-language' 'google-cloud-pubsub' 'google-cloud-bigquery' 'google-cloud-bigtable' 'google-cloud-datastore' 'google-cloud-spanner' 'google-cloud-storage' 'google-cloud-vision' 'google-cloud-translate' 'google-cloud-dns' 'google-cloud-videointelligence' ] STACKDRIVER_TABLE_NAME:['google-cloud-logging' 'google-cloud-monitoring' 'google-cloud-error_reporting' 'google-cloud-trace' ] GRPC_TABLE_NAME:['grpcio' ] THIRD_PARTY_TABLE_NAME:['pandas-gbq' ]}<def_stmt>get_weekly_clientlibs_downloads clientlibs_table_name date_str<block_start>"""Use a SQL query to collect the weekly download data of the client
libraries.
Args:
clientlibs_table_name (str): Table name, which is the key in the
CLIENTLIBS dict.
date_str (str): A date string in "YYYYMMDD" format.
Returns:
list: rows of the query result.
"""<line_sep>client_libs=CLIENTLIBS[clientlibs_table_name]<line_sep>date_time=datetime.datetime.strptime(date_str DATETIME_FORMAT)<line_sep>week_dates=[(date_time+datetime.timedelta(days=-i)).strftime(DATETIME_FORMAT)<for>i range(7)]<line_sep>query="""
SELECT
file.project as client_library_name,
COUNT(*) as download_count
FROM
`the-psf.pypi.downloads*`
WHERE
file.project IN UNNEST(@client_libs)
AND
_TABLE_SUFFIX IN UNNEST(@week_dates)
GROUP BY client_library_name
"""<line_sep>client=bigquery.Client()<line_sep>query_parameters=[bigquery.ArrayQueryParameter('client_libs' 'STRING' client_libs) bigquery.ArrayQueryParameter('week_dates' 'STRING' week_dates)]<line_sep>job_config=bigquery.QueryJobConfig()<line_sep>job_config.query_parameters=query_parameters<line_sep>query_job=client.query(query job_config=job_config)<line_sep># Wait for the job to complete and get the results
results=[row.values()<for>row query_job.result()]<line_sep>rows=[(date_time )+row<for>row results]<line_sep><return>rows<block_end><def_stmt>main <block_start><for_stmt>table_name CLIENTLIBS.keys()<block_start>rows=get_weekly_clientlibs_downloads(clientlibs_table_name=table_name date_str=datetime.datetime.now().strftime("%Y%m%d"))<line_sep>bq_utils.insert_rows(project=os.environ.get(GCLOUD_PROJECT_ENV) dataset_name=DATASET_NAME table_name=table_name rows=rows)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>keras.models Sequential<import_from_stmt>keras.models load_model<import_from_stmt>keras.models model_from_json<import_from_stmt>keras.layers.core Dense Activation<import_from_stmt>keras.utils np_utils<line_sep>#----------------------------
train=<false><line_sep>load_all_model=<true>#if train is False
#----------------------------
#preparing data for Exclusive OR (XOR)
attributes=[#x1, x2
[0 0] [0 1] [1 0] [1 1]]<line_sep>labels=[#is_0, is_1 -> only a column can be 1 in labels variable
[1 0] [0 1] [0 1] [1 0]]<line_sep>#transforming attributes and labels matrixes to numpy
data=np.array(attributes 'int64')<line_sep>target=np.array(labels 'int64')<line_sep>#----------------------------
#creating model
<if_stmt>train<eq><true><block_start>model=Sequential()<line_sep>model.add(Dense(3#num of hidden units
input_shape=(len(attributes[0]) )))<line_sep>#num of features in input layer
model.add(Activation('sigmoid'))#activation function from input layer to 1st hidden layer
model.add(Dense(len(labels[0])))#num of classes in output layer
model.add(Activation('softmax'))#activation function from 1st hidden layer to output layer
model_config=model.to_json()<line_sep>open("model_structure.json" "w").write(model_config)<line_sep>#compile
model.compile(loss='categorical_crossentropy' optimizer='adam')<line_sep>#training
model.fit(data target epochs=2000 verbose=0)<line_sep>model.save("model.hdf5")<line_sep>model.save_weights('model_weights.h5')<block_end><else_stmt><block_start><if_stmt>load_all_model<eq><true><block_start>model=load_model("model.hdf5")#model structure, weights
print("network structure and weights loaded")<block_end><else_stmt><block_start>model=model_from_json(open("model_structure.json" "r").read())#load structure
print("network structure loaded")<line_sep>model.compile(loss='categorical_crossentropy' optimizer='adam')<line_sep>model.load_weights('model_weights.h5')#load weights
print("weights loaded")<block_end><block_end>score=model.evaluate(data target)<line_sep>print(score)<line_sep>
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_stmt>six<import_from_stmt>kmip.core.attributes CertificateType<import_from_stmt>kmip.core enums<import_from_stmt>kmip.core.enums Tags<import_from_stmt>kmip.core exceptions<import_from_stmt>kmip.core.misc CertificateValue<import_from_stmt>kmip.core objects<import_from_stmt>kmip.core.objects Attribute<import_from_stmt>kmip.core.objects KeyBlock<import_from_stmt>kmip.core primitives<import_from_stmt>kmip.core.primitives Struct<import_from_stmt>kmip.core.primitives Enumeration<import_from_stmt>kmip.core.primitives ByteString<import_from_stmt>kmip.core utils<import_from_stmt>kmip.core.utils BytearrayStream<line_sep># 2.2
# 2.2.1
<class_stmt>Certificate(Struct)<block_start>"""
A structure representing a DER-encoded X.509 public key certificate.
See Section 2.2.1 of the KMIP 1.1 specification for more information.
Attributes:
certificate_type: The type of the certificate.
certificate_value: The bytes of the certificate.
"""<def_stmt>__init__ self certificate_type=<none> certificate_value=<none><block_start>"""
Construct a Certificate object.
Args:
certificate_type (CertificateType): The type of the
certificate. Optional, defaults to None.
certificate_value (bytes): The bytes of the certificate. Optional,
defaults to None.
"""<line_sep>super(Certificate self).__init__(Tags.CERTIFICATE)<if_stmt>certificate_type<is><none><block_start>self.certificate_type=CertificateType()<block_end><else_stmt><block_start>self.certificate_type=CertificateType(certificate_type)<block_end><if_stmt>certificate_value<is><none><block_start>self.certificate_value=CertificateValue()<block_end><else_stmt><block_start>self.certificate_value=CertificateValue(certificate_value)<block_end><block_end><def_stmt>read self istream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>"""
Read the data encoding the Certificate object and decode it into its
constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""<line_sep>super(Certificate self).read(istream kmip_version=kmip_version)<line_sep>tstream=BytearrayStream(istream.read(self.length))<line_sep>self.certificate_type=CertificateType()<line_sep>self.certificate_value=CertificateValue()<line_sep>self.certificate_type.read(tstream kmip_version=kmip_version)<line_sep>self.certificate_value.read(tstream kmip_version=kmip_version)<line_sep>self.is_oversized(tstream)<block_end><def_stmt>write self ostream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>"""
Write the data encoding the Certificate object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""<line_sep>tstream=BytearrayStream()<line_sep>self.certificate_type.write(tstream kmip_version=kmip_version)<line_sep>self.certificate_value.write(tstream kmip_version=kmip_version)<line_sep>self.length=tstream.length()<line_sep>super(Certificate self).write(ostream kmip_version=kmip_version)<line_sep>ostream.write(tstream.buffer)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other Certificate)<block_start><if_stmt>self.certificate_type<ne>other.certificate_type<block_start><return><false><block_end><elif_stmt>self.certificate_value<ne>other.certificate_value<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__ne__ self other<block_start><if_stmt>isinstance(other Certificate)<block_start><return><not>(self<eq>other)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__repr__ self<block_start><return>"{0}(certificate_type={1}, certificate_value=b'{2}')".format(type(self).__name__ str(self.certificate_type) str(self.certificate_value))<block_end><def_stmt>__str__ self<block_start><return>"{0}".format(str(self.certificate_value))<block_end><block_end># 2.2.2
<class_stmt>KeyBlockKey(Struct)<block_start><def_stmt>__init__ self key_block=<none> tag=Tags.DEFAULT<block_start>super(KeyBlockKey self).__init__(tag)<line_sep>self.key_block=key_block<line_sep>self.validate()<block_end><def_stmt>read self istream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>super(KeyBlockKey self).read(istream kmip_version=kmip_version)<line_sep>tstream=BytearrayStream(istream.read(self.length))<line_sep>self.key_block=KeyBlock()<line_sep>self.key_block.read(tstream kmip_version=kmip_version)<line_sep>self.is_oversized(tstream)<line_sep>self.validate()<block_end><def_stmt>write self ostream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>tstream=BytearrayStream()<line_sep>self.key_block.write(tstream kmip_version=kmip_version)<line_sep># Write the length and value of the template attribute
self.length=tstream.length()<line_sep>super(KeyBlockKey self).write(ostream kmip_version=kmip_version)<line_sep>ostream.write(tstream.buffer)<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end><class_stmt>SymmetricKey(KeyBlockKey)<block_start><def_stmt>__init__ self key_block=<none><block_start>super(SymmetricKey self).__init__(key_block Tags.SYMMETRIC_KEY)<line_sep>self.validate()<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end># 2.2.3
<class_stmt>PublicKey(KeyBlockKey)<block_start><def_stmt>__init__ self key_block=<none><block_start>super(PublicKey self).__init__(key_block Tags.PUBLIC_KEY)<line_sep>self.validate()<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end># 2.2.4
<class_stmt>PrivateKey(KeyBlockKey)<block_start><def_stmt>__init__ self key_block=<none><block_start>super(PrivateKey self).__init__(key_block Tags.PRIVATE_KEY)<line_sep>self.validate()<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end><class_stmt>SplitKey(primitives.Struct)<block_start>"""
A split key cryptographic object.
This object represents a symmetric or private key that has been split into
multiple parts. The fields of this object specify how the key was split
and how it can be reassembled.
Attributes:
split_key_parts: The total number of parts of the split key.
key_part_identifier: The ID specifying the part of the key in the key
block.
split_key_threshold: The minimum number of parts needed to reconstruct
the key.
split_key_method: The method by which the key was split.
prime_field_size: The prime field size used for the Polynomial Sharing
Prime Field split key method.
key_block: The split key part held by this object.
"""<def_stmt>__init__ self split_key_parts=<none> key_part_identifier=<none> split_key_threshold=<none> split_key_method=<none> prime_field_size=<none> key_block=<none><block_start>"""
Construct a SplitKey object.
Args:
split_key_parts (int): An integer specifying the total number of
parts of the split key. Optional, defaults to None. Required
for read/write.
key_part_identifier (int): An integer specifying which key part is
contained in the key block. Optional, defaults to None.
Required for read/write.
split_key_threshold (int): An integer specifying the minimum number
of key parts required to reconstruct the split key. Optional,
defaults to None. Required for read/write.
split_key_method (enum): A SplitKeyMethod enumeration specifying
the method by which the key was split. Optional, defaults to
None. Required for read/write.
prime_field_size (int): A big integer specifying the prime field
size used for the Polynomial Sharing Prime Field split key
method. Optional, defaults to None. Required for read/write
only if the split key method is Polynomial Sharing Prime Field.
key_block (struct): A KeyBlock structure containing the split key
part identified by the key part identifier. Optional, defaults
to None. Required for read/write.
"""<line_sep>super(SplitKey self).__init__(enums.Tags.SPLIT_KEY)<line_sep>self._split_key_parts=<none><line_sep>self._key_part_identifier=<none><line_sep>self._split_key_threshold=<none><line_sep>self._split_key_method=<none><line_sep>self._prime_field_size=<none><line_sep>self._key_block=<none><line_sep>self.split_key_parts=split_key_parts<line_sep>self.key_part_identifier=key_part_identifier<line_sep>self.split_key_threshold=split_key_threshold<line_sep>self.split_key_method=split_key_method<line_sep>self.prime_field_size=prime_field_size<line_sep>self.key_block=key_block<block_end>@property<def_stmt>split_key_parts self<block_start><if_stmt>self._split_key_parts<is><not><none><block_start><return>self._split_key_parts.value<block_end><return><none><block_end>@split_key_parts.setter<def_stmt>split_key_parts self value<block_start><if_stmt>value<is><none><block_start>self._split_key_parts=<none><block_end><elif_stmt>isinstance(value six.integer_types)<block_start>self._split_key_parts=primitives.Integer(value=value tag=enums.Tags.SPLIT_KEY_PARTS)<block_end><else_stmt><block_start><raise>TypeError("The split key parts must be an integer.")<block_end><block_end>@property<def_stmt>key_part_identifier self<block_start><if_stmt>self._key_part_identifier<is><not><none><block_start><return>self._key_part_identifier.value<block_end><return><none><block_end>@key_part_identifier.setter<def_stmt>key_part_identifier self value<block_start><if_stmt>value<is><none><block_start>self._key_part_identifier=<none><block_end><elif_stmt>isinstance(value six.integer_types)<block_start>self._key_part_identifier=primitives.Integer(value=value tag=enums.Tags.KEY_PART_IDENTIFIER)<block_end><else_stmt><block_start><raise>TypeError("The key part identifier must be an integer.")<block_end><block_end>@property<def_stmt>split_key_threshold self<block_start><if_stmt>self._split_key_threshold<is><not><none><block_start><return>self._split_key_threshold.value<block_end><return><none><block_end>@split_key_threshold.setter<def_stmt>split_key_threshold self value<block_start><if_stmt>value<is><none><block_start>self._split_key_threshold=<none><block_end><elif_stmt>isinstance(value six.integer_types)<block_start>self._split_key_threshold=primitives.Integer(value=value tag=enums.Tags.SPLIT_KEY_THRESHOLD)<block_end><else_stmt><block_start><raise>TypeError("The split key threshold must be an integer.")<block_end><block_end>@property<def_stmt>split_key_method self<block_start><if_stmt>self._split_key_method<is><not><none><block_start><return>self._split_key_method.value<block_end><return><none><block_end>@split_key_method.setter<def_stmt>split_key_method self value<block_start><if_stmt>value<is><none><block_start>self._split_key_method=<none><block_end><elif_stmt>isinstance(value enums.SplitKeyMethod)<block_start>self._split_key_method=primitives.Enumeration(enums.SplitKeyMethod value=value tag=enums.Tags.SPLIT_KEY_METHOD)<block_end><else_stmt><block_start><raise>TypeError("The split key method must be a SplitKeyMethod enumeration.")<block_end><block_end>@property<def_stmt>prime_field_size self<block_start><if_stmt>self._prime_field_size<is><not><none><block_start><return>self._prime_field_size.value<block_end><return><none><block_end>@prime_field_size.setter<def_stmt>prime_field_size self value<block_start><if_stmt>value<is><none><block_start>self._prime_field_size=<none><block_end><elif_stmt>isinstance(value six.integer_types)<block_start>self._prime_field_size=primitives.BigInteger(value=value tag=enums.Tags.PRIME_FIELD_SIZE)<block_end><else_stmt><block_start><raise>TypeError("The prime field size must be an integer.")<block_end><block_end>@property<def_stmt>key_block self<block_start><if_stmt>self._key_block<is><not><none><block_start><return>self._key_block<block_end><return><none><block_end>@key_block.setter<def_stmt>key_block self value<block_start><if_stmt>value<is><none><block_start>self._key_block=<none><block_end><elif_stmt>isinstance(value objects.KeyBlock)<block_start>self._key_block=value<block_end><else_stmt><block_start><raise>TypeError("The key block must be a KeyBlock structure.")<block_end><block_end><def_stmt>read self input_buffer kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>"""
Read the data encoding the SplitKey object and decode it.
Args:
input_buffer (stream): A data stream containing the encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""<line_sep>super(SplitKey self).read(input_buffer kmip_version=kmip_version)<line_sep>local_buffer=utils.BytearrayStream(input_buffer.read(self.length))<if_stmt>self.is_tag_next(enums.Tags.SPLIT_KEY_PARTS local_buffer)<block_start>self._split_key_parts=primitives.Integer(tag=enums.Tags.SPLIT_KEY_PARTS)<line_sep>self._split_key_parts.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The SplitKey encoding is missing the SplitKeyParts field.")<block_end><if_stmt>self.is_tag_next(enums.Tags.KEY_PART_IDENTIFIER local_buffer)<block_start>self._key_part_identifier=primitives.Integer(tag=enums.Tags.KEY_PART_IDENTIFIER)<line_sep>self._key_part_identifier.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The SplitKey encoding is missing the KeyPartIdentifier field.")<block_end><if_stmt>self.is_tag_next(enums.Tags.SPLIT_KEY_THRESHOLD local_buffer)<block_start>self._split_key_threshold=primitives.Integer(tag=enums.Tags.SPLIT_KEY_THRESHOLD)<line_sep>self._split_key_threshold.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The SplitKey encoding is missing the SplitKeyThreshold field.")<block_end><if_stmt>self.is_tag_next(enums.Tags.SPLIT_KEY_METHOD local_buffer)<block_start>self._split_key_method=primitives.Enumeration(enums.SplitKeyMethod tag=enums.Tags.SPLIT_KEY_METHOD)<line_sep>self._split_key_method.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The SplitKey encoding is missing the SplitKeyMethod field.")<block_end><if_stmt>self.is_tag_next(enums.Tags.PRIME_FIELD_SIZE local_buffer)<block_start>self._prime_field_size=primitives.BigInteger(tag=enums.Tags.PRIME_FIELD_SIZE)<line_sep>self._prime_field_size.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start>corner_case=enums.SplitKeyMethod.POLYNOMIAL_SHARING_PRIME_FIELD<if_stmt>self.split_key_method<eq>corner_case<block_start><raise>exceptions.InvalidKmipEncoding("The SplitKey encoding is missing the PrimeFieldSize "<concat>"field. This field is required when the SplitKeyMethod is "<concat>"PolynomialSharingPrimeField.")<block_end><block_end><if_stmt>self.is_tag_next(enums.Tags.KEY_BLOCK local_buffer)<block_start>self._key_block=objects.KeyBlock()<line_sep>self._key_block.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The SplitKey encoding is missing the KeyBlock field.")<block_end>self.is_oversized(local_buffer)<block_end><def_stmt>write self output_buffer kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>"""
Write the data encoding the SplitKey object to a buffer.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""<line_sep>local_buffer=utils.BytearrayStream()<if_stmt>self._split_key_parts<block_start>self._split_key_parts.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The SplitKey object is missing the SplitKeyParts field.")<block_end><if_stmt>self._key_part_identifier<block_start>self._key_part_identifier.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The SplitKey object is missing the KeyPartIdentifier field.")<block_end><if_stmt>self._split_key_threshold<block_start>self._split_key_threshold.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The SplitKey object is missing the SplitKeyThreshold field.")<block_end><if_stmt>self._split_key_method<block_start>self._split_key_method.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The SplitKey object is missing the SplitKeyMethod field.")<block_end><if_stmt>self._prime_field_size<block_start>self._prime_field_size.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start>corner_case=enums.SplitKeyMethod.POLYNOMIAL_SHARING_PRIME_FIELD<if_stmt>self.split_key_method<eq>corner_case<block_start><raise>exceptions.InvalidField("The SplitKey object is missing the PrimeFieldSize field. "<concat>"This field is required when the SplitKeyMethod is "<concat>"PolynomialSharingPrimeField.")<block_end><block_end><if_stmt>self._key_block<block_start>self._key_block.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The SplitKey object is missing the KeyBlock field.")<block_end>self.length=local_buffer.length()<line_sep>super(SplitKey self).write(output_buffer kmip_version=kmip_version)<line_sep>output_buffer.write(local_buffer.buffer)<block_end><def_stmt>__repr__ self<block_start>args=["split_key_parts={}".format(repr(self.split_key_parts)) "key_part_identifier={}".format(repr(self.key_part_identifier)) "split_key_threshold={}".format(repr(self.split_key_threshold)) "split_key_method={}".format(self.split_key_method) "prime_field_size={}".format(repr(self.prime_field_size)) "key_block={}".format(repr(self.key_block))]<line_sep><return>"SplitKey({})".format(", ".join(args))<block_end><def_stmt>__str__ self# TODO (peter-hamilton) Replace str() call below with a dict() call.
<block_start>value=", ".join(['"split_key_parts": {}'.format(self.split_key_parts) '"key_part_identifier": {}'.format(self.key_part_identifier) '"split_key_threshold": {}'.format(self.split_key_threshold) '"split_key_method": {}'.format(self.split_key_method) '"prime_field_size": {}'.format(self.prime_field_size) '"key_block": {}'.format(str(self.key_block))])<line_sep><return>"{"+value+"}"<block_end><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other SplitKey)<block_start><if_stmt>self.split_key_parts<ne>other.split_key_parts<block_start><return><false><block_end><elif_stmt>self.key_part_identifier<ne>other.key_part_identifier<block_start><return><false><block_end><elif_stmt>self.split_key_threshold<ne>other.split_key_threshold<block_start><return><false><block_end><elif_stmt>self.split_key_method<ne>other.split_key_method<block_start><return><false><block_end><elif_stmt>self.prime_field_size<ne>other.prime_field_size<block_start><return><false><block_end># elif self.key_block != other.key_block:
# return False
<return><true><block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__ne__ self other<block_start><if_stmt>isinstance(other SplitKey)<block_start><return><not>self.__eq__(other)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><block_end># 2.2.6
<class_stmt>Template(Struct)<block_start><def_stmt>__init__ self attributes=<none><block_start>super(Template self).__init__(Tags.TEMPLATE)<line_sep>self.attributes=attributes<line_sep>self.validate()<block_end><def_stmt>read self istream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>super(Template self).read(istream kmip_version=kmip_version)<line_sep>tstream=BytearrayStream(istream.read(self.length))<line_sep>self.attributes=list()<line_sep>attribute=Attribute()<line_sep>attribute.read(tstream kmip_version=kmip_version)<line_sep>self.attributes.append(attribute)<while_stmt>self.is_tag_next(Tags.ATTRIBUTE tstream)<block_start>attribute=Attribute()<line_sep>attribute.read(tstream kmip_version=kmip_version)<line_sep>self.attributes.append(attribute)<block_end>self.is_oversized(tstream)<line_sep>self.validate()<block_end><def_stmt>write self ostream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>tstream=BytearrayStream()<for_stmt>attribute self.attributes<block_start>attribute.write(tstream kmip_version=kmip_version)<block_end># Write the length and value of the template attribute
self.length=tstream.length()<line_sep>super(Template self).write(ostream kmip_version=kmip_version)<line_sep>ostream.write(tstream.buffer)<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end># 2.2.7
<class_stmt>SecretData(Struct)<block_start><class_stmt>SecretDataType(Enumeration)<block_start><def_stmt>__init__ self value=<none><block_start>super(SecretData.SecretDataType self).__init__(enums.SecretDataType value Tags.SECRET_DATA_TYPE)<block_end><block_end><def_stmt>__init__ self secret_data_type=<none> key_block=<none><block_start>super(SecretData self).__init__(Tags.SECRET_DATA)<line_sep>self.secret_data_type=secret_data_type<line_sep>self.key_block=key_block<line_sep>self.validate()<block_end><def_stmt>read self istream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>super(SecretData self).read(istream kmip_version=kmip_version)<line_sep>tstream=BytearrayStream(istream.read(self.length))<line_sep>self.secret_data_type=SecretData.SecretDataType()<line_sep>self.key_block=KeyBlock()<line_sep>self.secret_data_type.read(tstream kmip_version=kmip_version)<line_sep>self.key_block.read(tstream kmip_version=kmip_version)<line_sep>self.is_oversized(tstream)<line_sep>self.validate()<block_end><def_stmt>write self ostream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>tstream=BytearrayStream()<line_sep>self.secret_data_type.write(tstream kmip_version=kmip_version)<line_sep>self.key_block.write(tstream kmip_version=kmip_version)<line_sep># Write the length and value of the template attribute
self.length=tstream.length()<line_sep>super(SecretData self).write(ostream kmip_version=kmip_version)<line_sep>ostream.write(tstream.buffer)<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end># 2.2.8
<class_stmt>OpaqueObject(Struct)<block_start><class_stmt>OpaqueDataType(Enumeration)<block_start><def_stmt>__init__ self value=<none><block_start>super(OpaqueObject.OpaqueDataType self).__init__(enums.OpaqueDataType value Tags.OPAQUE_DATA_TYPE)<block_end><block_end><class_stmt>OpaqueDataValue(ByteString)<block_start><def_stmt>__init__ self value=<none><block_start>super(OpaqueObject.OpaqueDataValue self).__init__(value Tags.OPAQUE_DATA_VALUE)<block_end><block_end><def_stmt>__init__ self opaque_data_type=<none> opaque_data_value=<none><block_start>super(OpaqueObject self).__init__(Tags.OPAQUE_OBJECT)<line_sep>self.opaque_data_type=opaque_data_type<line_sep>self.opaque_data_value=opaque_data_value<line_sep>self.validate()<block_end><def_stmt>read self istream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>super(OpaqueObject self).read(istream kmip_version=kmip_version)<line_sep>tstream=BytearrayStream(istream.read(self.length))<line_sep>self.opaque_data_type=OpaqueObject.OpaqueDataType()<line_sep>self.opaque_data_value=OpaqueObject.OpaqueDataValue()<line_sep>self.opaque_data_type.read(tstream kmip_version=kmip_version)<line_sep>self.opaque_data_value.read(tstream kmip_version=kmip_version)<line_sep>self.is_oversized(tstream)<line_sep>self.validate()<block_end><def_stmt>write self ostream kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>tstream=BytearrayStream()<line_sep>self.opaque_data_type.write(tstream kmip_version=kmip_version)<line_sep>self.opaque_data_value.write(tstream kmip_version=kmip_version)<line_sep># Write the length and value of the template attribute
self.length=tstream.length()<line_sep>super(OpaqueObject self).write(ostream kmip_version=kmip_version)<line_sep>ostream.write(tstream.buffer)<block_end><def_stmt>validate self<block_start>self.__validate()<block_end><def_stmt>__validate self# TODO (peter-hamilton) Finish implementation.
<block_start><pass><block_end><block_end>
|
<import_from_stmt>easydict EasyDict<line_sep>conv1d_config=dict(feature_embedding=dict(player=dict(input_dim=36 output_dim=64 ) ball=dict(input_dim=18 output_dim=64 ) left_team=dict(input_dim=7 output_dim=48 conv1d_output_channel=36 fc_output_dim=96 ) right_team=dict(input_dim=7 output_dim=48 conv1d_output_channel=36 fc_output_dim=96 ) left_closest=dict(input_dim=7 output_dim=48 ) right_closest=dict(input_dim=7 output_dim=48 )) fc_cat=dict(input_dim=416 ) lstm_size=256 policy_head=dict(input_dim=256 hidden_dim=164 act_shape=19 ) value_head=dict(input_dim=256 hidden_dim=164 output_dim=1) )<line_sep>conv1d_default_config=EasyDict(conv1d_config)<line_sep>
|
<def_stmt>dummy_test <block_start><pass><block_end>
|
"""Centralize non-Flask code for 2020 User Geography data aggregation here.
This file serves both as a library for the Flask app as well as
a bootstrap for Celery tasks, which could be run with something like
celery -A census_extractomatic.user_geo:celery_app worker
"""<import_from_stmt>datetime timedelta<import_from_stmt>sqlalchemy.sql text<import_stmt>json<import_from_stmt>collections OrderedDict<import_from_stmt>copy deepcopy<import_from_stmt>tempfile NamedTemporaryFile<import_stmt>zipfile<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>ogr<import_from_stmt>celery Celery<import_stmt>os<import_from_stmt>sqlalchemy create_engine<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<import_stmt>logging<line_sep>logger=logging.getLogger('gunicorn.error')<import_from_stmt>timeit default_timer<as>timer<line_sep>SQLALCHEMY_DATABASE_URI=os.environ.get('DATABASE_URL')<line_sep>CELERY_BROKER=os.environ['REDIS_URL']<line_sep>celery_app=Celery(__name__ broker=CELERY_BROKER)<line_sep>celery_db=create_engine(SQLALCHEMY_DATABASE_URI)<line_sep>@celery_app.task<def_stmt>join_user_geo_to_blocks_task user_geodata_id<block_start>join_user_to_census(celery_db user_geodata_id)<block_end>COMPARISON_RELEASE_CODE='dec_pl94_compare_2020_2010'<line_sep>USER_GEODATA_INSERT_SQL=text("""
INSERT INTO aggregation.user_geodata (name, hash_digest, source_url, public, fields, bbox)
VALUES (:name, :hash_digest, :source_url, :public, :fields, ST_MakeEnvelope(:xmin, :ymin, :xmax, :ymax, 4326))
RETURNING *
""")<line_sep>USER_GEODATA_GEOMETRY_INSERT_SQL=text("""
INSERT INTO aggregation.user_geodata_geometry (user_geodata_id, geom, name, original_id, properties)
VALUES (:user_geodata_id,
ST_Transform(
ST_GeomFromText(:geom_wkt,:epsg),
4326),
:name,
:original_id,
:properties
)
""")<line_sep>USER_GEODATA_SELECT_BY_HASH_DIGEST=text('''
SELECT user_geodata_id,
EXTRACT(EPOCH from created_at) unix_timestamp,
name,
bbox,
fields,
source_url,
status,
notes_html,
public
FROM aggregation.user_geodata
WHERE hash_digest=:hash_digest
''')<line_sep>AGGREGATE_BLOCKS_2010_SQL=text("""
INSERT INTO aggregation.user_geodata_blocks_2010 (user_geodata_geometry_id, geoid)
SELECT ugg.user_geodata_geometry_id, b.geoid10
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
blocks.tabblock10 b
WHERE ug.user_geodata_id = :geodata_id
AND ug.user_geodata_id = ugg.user_geodata_id
AND ST_Intersects(ug.bbox, b.geom)
AND ST_Contains(ugg.geom,
ST_SetSRID(ST_MakePoint(b.intptlon10::double precision,
b.intptlat10::double precision),
4326))
""")<line_sep>AGGREGATE_BLOCKS_2020_SQL=text("""
INSERT INTO aggregation.user_geodata_blocks_2020 (user_geodata_geometry_id, geoid)
SELECT ugg.user_geodata_geometry_id, b.geoid20
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
blocks.tabblock20 b
WHERE ug.user_geodata_id = :geodata_id
AND ug.user_geodata_id = ugg.user_geodata_id
AND ST_Intersects(ug.bbox, b.geom)
AND ST_Contains(ugg.geom,
ST_SetSRID(ST_MakePoint(b.intptlon20::double precision,
b.intptlat20::double precision),
4326))
""")<line_sep>USER_GEOMETRY_SELECT_WITH_GEOM_BY_HASH_DIGEST=text('''
SELECT ugg.user_geodata_geometry_id, ugg.name, ugg.original_id, ST_asGeoJSON(ST_ForcePolygonCCW(ugg.geom))
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg
WHERE ug.hash_digest=:hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
''')<line_sep>USER_GEOMETRY_SELECT_2020_BLOCKS_WITH_GEOM_BY_HASH_DIGEST=text('''
SELECT ug.name upload_name,
ugb.geoid,
ugg.user_geodata_geometry_id cr_geoid,
ugg.name,
ugg.original_id,
g.pop100,
g.hu100,
g.state || g.place as state_place_fips,
ST_asGeoJSON(ST_ForcePolygonCCW(b.geom)) geom
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
aggregation.user_geodata_blocks_2020 ugb,
dec2020_pl94.geoheader g,
blocks.tabblock20 b
WHERE ug.hash_digest=:hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
AND ugg.user_geodata_geometry_id = ugb.user_geodata_geometry_id
AND ugb.geoid = b.geoid20
AND b.geoid20 = g.geoid
''')<line_sep>USER_GEOMETRY_SELECT_2010_BLOCKS_WITH_GEOM_BY_HASH_DIGEST=text('''
SELECT ug.name upload_name,
ugb.geoid,
ugg.user_geodata_geometry_id cr_geoid,
ugg.name,
ugg.original_id,
g.pop100,
g.hu100,
g.state || g.place as state_place_fips,
ST_asGeoJSON(ST_ForcePolygonCCW(b.geom)) geom
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
aggregation.user_geodata_blocks_2010 ugb,
dec2010_pl94.geoheader g,
blocks.tabblock10 b
WHERE ug.hash_digest=:hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
AND ugg.user_geodata_geometry_id = ugb.user_geodata_geometry_id
AND ugb.geoid = b.geoid10
AND b.geoid10 = g.geoid
''')<line_sep>BLOCK_VINTAGE_TABLES={'dec2010_pl94':'user_geodata_blocks_2010' 'dec2020_pl94':'user_geodata_blocks_2020'}<line_sep>SELECT_BY_USER_GEOGRAPHY_SQL_TEMPLATE="""
SELECT ugg.user_geodata_geometry_id,
ugg.name,
ugg.original_id,
ST_asGeoJSON(ST_ForcePolygonCCW(ugg.geom)) geom,
d.*
FROM aggregation.user_geodata ug,
aggregation.user_geodata_geometry ugg,
aggregation.{blocks_vintage_table} ugb,
{schema}.{table_code} d
WHERE ug.hash_digest = :hash_digest
AND ug.user_geodata_id = ugg.user_geodata_id
AND ugg.user_geodata_geometry_id = ugb.user_geodata_geometry_id
AND ugb.geoid = d.geoid
"""<def_stmt>fetch_user_geodata db hash_digest<block_start><with_stmt>db.engine.begin()<as>con<block_start>cur=con.execute(USER_GEODATA_SELECT_BY_HASH_DIGEST hash_digest=hash_digest)<line_sep>keys=list(cur._metadata.keys)<line_sep>row=cur.first()<if_stmt>row<block_start><return>dict(zip(keys row))<block_end><block_end><return><none><block_end><def_stmt>_fieldsFromOGRLayer layer<block_start>fields=[]<line_sep>ldefn=layer.GetLayerDefn()<for_stmt>n range(ldefn.GetFieldCount())<block_start>fdefn=ldefn.GetFieldDefn(n)<line_sep>fields.append(fdefn.name)<block_end><return>fields<block_end><def_stmt>save_user_geojson db geojson_str hash_digest dataset_name name_field id_field source_url share_checked<block_start>tmp=NamedTemporaryFile('w' suffix='.json' delete=<false>)<line_sep>tmp.write(geojson_str)<line_sep>tmp.close()<line_sep>ogr_file=ogr.Open(tmp.name)<if_stmt>ogr_file<is><none><block_start><raise>ValueError(f"ogr.Open failed for {tmp.name}")<block_end># assume geojson always has one layer, right?
l=ogr_file.GetLayer(0)<line_sep>epsg=l.GetSpatialRef().GetAuthorityCode(<none>)<line_sep>(xmin xmax ymin ymax)=l.GetExtent()<line_sep>dataset_id=<none><line_sep>fields=_fieldsFromOGRLayer(l)<with_stmt>db.engine.begin()<as>con<block_start>cur=con.execute(USER_GEODATA_INSERT_SQL name=dataset_name hash_digest=hash_digest source_url=source_url public=share_checked fields=json.dumps(fields) xmin=xmin ymin=ymin xmax=xmax ymax=ymax)<line_sep>dataset_id=cur.fetchall()[0][0]<for_stmt>i range(0 l.GetFeatureCount())<block_start>f=l.GetFeature(i)<line_sep>mp=ogr.ForceToMultiPolygon(f.GetGeometryRef())<line_sep>properties=dict((fld f.GetField(i))<for>i,fld enumerate(fields))<line_sep>con.execute(USER_GEODATA_GEOMETRY_INSERT_SQL user_geodata_id=dataset_id geom_wkt=mp.ExportToWkt() epsg=epsg name=properties.get(name_field) original_id=properties.get(id_field) properties=json.dumps(properties))<block_end><block_end><if_stmt>dataset_id<is><not><none><block_start>join_user_geo_to_blocks_task.delay(dataset_id)<block_end><return>dataset_id<block_end><def_stmt>list_user_geographies db<block_start>cur=db.engine.execute('select *, st_asGeoJSON(bbox) bbox_json from aggregation.user_geodata where public = true order by name')<line_sep>results=[]<for_stmt>row cur<block_start>d=dict(row)<line_sep>bbox_json=d.pop('bbox_json')<line_sep># parse JSON string and get rid of binary bbox
<if_stmt>bbox_json<block_start>d['bbox']=json.loads(bbox_json)<block_end><else_stmt><block_start><del_stmt>d['bbox']<block_end>results.append(d)<block_end><return>results<block_end><def_stmt>join_user_to_census db user_geodata_id<block_start>"""Waffling a little on structure but this provides a single transaction-protected function which computes block joins
for all user geographies associated with a specified user geo dataset, including clearing out anything which
might have been there (shouldn't really be) and managing the status.
"""<line_sep># first set the status in its own transaction so that it serves as a sign that the work is happening.
# we may want to check the status to make sure it isn't already processing to avoid overlapping jobs
# although the delete statements should mean that isn't a terrible problem, just a longer CPU load
db.engine.execute(text("UPDATE aggregation.user_geodata SET status = 'PROCESSING' where user_geodata_id = :geodata_id") geodata_id=user_geodata_id)<with_stmt>db.engine.begin()<as>con<block_start>con.execute(text("""
DELETE FROM aggregation.user_geodata_blocks_2010
WHERE user_geodata_geometry_id in
(SELECT user_geodata_geometry_id FROM aggregation.user_geodata_geometry
WHERE user_geodata_id=:geodata_id)""") geodata_id=user_geodata_id)<line_sep>con.execute(text("""
DELETE FROM aggregation.user_geodata_blocks_2020
WHERE user_geodata_geometry_id in
(SELECT user_geodata_geometry_id FROM aggregation.user_geodata_geometry
WHERE user_geodata_id=:geodata_id)""") geodata_id=user_geodata_id)<line_sep>con.execute(AGGREGATE_BLOCKS_2010_SQL geodata_id=user_geodata_id)<line_sep>con.execute(AGGREGATE_BLOCKS_2020_SQL geodata_id=user_geodata_id)<line_sep>db.engine.execute(text("UPDATE aggregation.user_geodata SET status = 'READY' where user_geodata_id = :geodata_id") geodata_id=user_geodata_id)<block_end><block_end><def_stmt>_blankFeatureCollection <block_start><return>{"type":"FeatureCollection" "features":[]}<block_end><def_stmt>fetch_user_geog_as_geojson db hash_digest<block_start>geojson=_blankFeatureCollection()<line_sep>cur=db.engine.execute(USER_GEOMETRY_SELECT_WITH_GEOM_BY_HASH_DIGEST hash_digest=hash_digest)<if_stmt>cur.rowcount<eq>0<block_start><raise>ValueError(f"Invalid geography ID {hash_digest}")<block_end><for_stmt>cr_geoid,name,original_id,geojson_str cur<block_start>base={'type':'Feature'}<line_sep>base['geometry']=json.loads(geojson_str)<line_sep>base['properties']={'cr_geoid':cr_geoid}<if_stmt>name<is><not><none><block_start>base['properties']['name']=name<block_end><if_stmt>original_id<is><not><none><block_start>base['properties']['original_id']=original_id<line_sep>base['id']=original_id<block_end>geojson['features'].append(base)<block_end><return>geojson<block_end>USER_BLOCKS_BY_HASH_DIGEST_SQL={'2020':USER_GEOMETRY_SELECT_2020_BLOCKS_WITH_GEOM_BY_HASH_DIGEST '2010':USER_GEOMETRY_SELECT_2010_BLOCKS_WITH_GEOM_BY_HASH_DIGEST}<def_stmt>fetch_metadata release=<none> table_code=<none># for now we'll just do it from literal objects here but deepcopy them so we don't get messed up
# maybe later we'll make a metadata schema in the database
<block_start><if_stmt>table_code<is><none><block_start><raise>Exception('Table code must be specified for metadata fetch')<block_end>md=METADATA.get(table_code.lower())<if_stmt>md<block_start><if_stmt>release<is><none><or>release<in>md['releases']<block_start><return>deepcopy(md)<block_end><if_stmt>release<eq>COMPARISON_RELEASE_CODE<block_start>c_10=[]<line_sep>c_20=[]<line_sep>c_change=[]<line_sep>base=deepcopy(md)<for_stmt>col,label md['columns'].items()<block_start>c_10.append((f"{col}_2010" f"{label} (2010)"))<line_sep>c_20.append((f"{col}_2020" f"{label} (2020)"))<line_sep>c_change.append((f"{col}_pct_chg" f"{label} (% change)"))<line_sep>base['columns']=OrderedDict(c_20+c_10+c_change)<block_end><return>base<block_end><block_end><return><none><block_end><def_stmt>evaluateUserGeographySQLTemplate schema table_code<block_start>"""Schemas and table names can't be handled as bindparams with SQLAlchemy, so
this allows us to use a 'select *' syntax for multiple tables.
"""<try_stmt><block_start>blocks_vintage_table=BLOCK_VINTAGE_TABLES[schema]<block_end><except_stmt>KeyError<block_start><raise>ValueError(f"No blocks vintage identified for given schema {schema}")<block_end><return>SELECT_BY_USER_GEOGRAPHY_SQL_TEMPLATE.format(schema=schema table_code=table_code blocks_vintage_table=blocks_vintage_table)<block_end><def_stmt>aggregate_decennial db hash_digest release table_code<block_start>"""For the given user geography, identified by hash_digest, aggregate the given table
for the given decennial census release, and return a Pandas dataframe with the results.
In addition to the data columns for the given table, the dataframe may include columns
'name' and/or 'original_id', if the user geography identified sources for those in their
upload.
"""<if_stmt>fetch_metadata(release=release table_code=table_code)<block_start>sql=evaluateUserGeographySQLTemplate(release table_code)<line_sep>query=text(sql).bindparams(hash_digest=hash_digest)<line_sep>logger.info(f'aggregate_decennial: starting timer {hash_digest} {release} {table_code}')<line_sep>start=timer()<line_sep>df=pd.read_sql(query db.engine)<line_sep>end=timer()<line_sep>logger.info(f"pd.read_sql {hash_digest} {release} {table_code} elapsed time {timedelta(seconds=end-start)}")<line_sep>df=df.drop('geoid' axis=1)# we don't care about the original blocks after we groupby
agg_funcs=dict((c 'sum')<for>c df.columns[1:])<line_sep>agg_funcs['name']='first'# these string values are
agg_funcs['original_id']='first'# the same for each row aggregated
agg_funcs['geom']='first'# by 'user_geodata_geometry_id'
aggd=df.groupby('user_geodata_geometry_id').agg(agg_funcs)<for_stmt>c ['name' 'original_id']<block_start><if_stmt>aggd[c].isnull().all()<block_start>aggd=aggd.drop(c axis=1)<block_end><block_end>aggd=aggd.reset_index()<line_sep>end=timer()<line_sep>logger.info(f"all processing {hash_digest} {release} {table_code} total elapsed time {timedelta(seconds=end-start)}")<line_sep><return>aggd<block_end><raise>ValueError('Invalid release or table code')<block_end><def_stmt>aggregate_decennial_comparison db hash_digest table_code<block_start>agg_2020=aggregate_decennial(db hash_digest 'dec2020_pl94' table_code).set_index('user_geodata_geometry_id')<line_sep>agg_2010=aggregate_decennial(db hash_digest 'dec2010_pl94' table_code).set_index('user_geodata_geometry_id')<line_sep># not all uploads have all columns, so be responsive to the data
label_cols=[]<for_stmt>c ['name' 'original_id' 'geom']<block_start><if_stmt>c<in>agg_2020<block_start>label_cols.append(c)<block_end><block_end>label_df=agg_2020[label_cols]<line_sep>agg_2020=agg_2020.drop(label_cols axis=1)<line_sep>agg_2010=agg_2010.drop(label_cols axis=1)<line_sep>pct_chg=(agg_2020-agg_2010)/agg_2010<line_sep>joined=agg_2020.join(agg_2010 lsuffix='_2020' rsuffix='_2010')<line_sep>joined=joined.join(pct_chg.rename(columns=<lambda>x:f"{x}_change"))<line_sep><return>label_df.join(joined).reset_index()<block_end><def_stmt>dataframe_to_feature_collection df:pd.DataFrame geom_col<block_start>"""Given a Pandas dataframe with one column stringified GeoJSON, return a
dict representing a GeoJSON FeatureCollection, where `geom_col` is parsed and
used for the 'geometry' and the rest of the row is converted to a 'properties' dict."""<line_sep>geojson={"type":"FeatureCollection" "features":[]}<for_stmt>_,row df.iterrows()<block_start>row=row.to_dict()<line_sep>geom=row.pop(geom_col)<line_sep>f={'type':'Feature' 'geometry':json.loads(geom) 'properties':row}<if_stmt>'original_id'<in>row<block_start>f['id']=row['original_id']<block_end>geojson['features'].append(f)<block_end><return>geojson<block_end><def_stmt>create_block_xref_download db hash_digest year<block_start><try_stmt><block_start>sql=USER_BLOCKS_BY_HASH_DIGEST_SQL[str(year)]<block_end><except_stmt>KeyError<block_start><raise>ValueError(f"Invalid year {year}")<block_end>df=pd.read_sql(sql.bindparams(hash_digest=hash_digest) db.engine)<line_sep>user_geo_name=str(df['upload_name'].unique().squeeze())<line_sep>df=df.drop('upload_name' axis=1)<line_sep>metadata={'title':f"Census Reporter {year} Block Assignments for {user_geo_name}" 'columns':OrderedDict((('geoid' f'15-character unique block identifier') ('cr_geoid' '''An arbitrary unique identifier for a specific geography (e.g. neighborhood) included in a user uploaded map''') ('name' 'A name for a specific geography included in a user uploaded map, if available') ('original_id' 'A unique identifier for a specific geography included in a user uploaded map, from the original source, if available') ('pop100' f'The total population for the given block (Decennial Census {year})') ('hu100' f'The total housing units (occupied or vacant) for the given block (Decennial Census {year})') ('state_place_fips' f'The combined State/Place FIPS code for the given block (Decennial Census {year})') ))}<line_sep>release=f'tiger{year}'<line_sep>table_code='block_assignments'<line_sep>tmp=write_compound_zipfile(hash_digest release table_code df metadata)<line_sep>remote_filename=build_filename(hash_digest year 'block_assignments' 'zip')<line_sep>move_file_to_s3(tmp.name hash_digest remote_filename)<line_sep><return>tmp<block_end><def_stmt>create_aggregate_download db hash_digest release table_code<block_start><if_stmt>release<eq>COMPARISON_RELEASE_CODE<block_start>aggregated=aggregate_decennial_comparison(db hash_digest table_code)<block_end><else_stmt><block_start>aggregated=aggregate_decennial(db hash_digest release table_code)<block_end>metadata=fetch_metadata(release=release table_code=table_code)<if_stmt>'original_id'<in>aggregated# original id is second if its there so insert it first
<block_start>metadata['columns']['original_id']='Geographic Identifier'<line_sep>metadata['columns'].move_to_end('original_id' last=<false>)<block_end><if_stmt>'name'<in>aggregated# name is first if its there
<block_start>metadata['columns']['name']='Geography Name'<line_sep>metadata['columns'].move_to_end('name' last=<false>)<block_end># only need it if there's no name or ID. will we even tolerate that?
<if_stmt>'name'<in>aggregated<or>'original_id'<in>aggregated<block_start>aggregated=aggregated.drop('user_geodata_geometry_id' axis=1)<block_end><else_stmt><block_start>aggregated=aggregated.rename(columns={'user_geodata_geometry_id':'cr_geoid'})<line_sep>metadata['columns']['cr_geoid']='Census Reporter Geography ID'<line_sep>metadata['columns'].move_to_end('cr_geoid' last=<false>)<block_end># NaN and inf bork JSON and inf looks bad in CSV too.
# Any columns could have NaN, not just pct_chg -- e.g. Atlanta has n'hoods which get no 2010 blocks
aggregated=aggregated.replace([np.inf -np.inf np.nan] '')<line_sep>tmp=write_compound_zipfile(hash_digest release table_code aggregated metadata)<line_sep>remote_filename=build_filename(hash_digest release table_code 'zip')<line_sep>move_file_to_s3(tmp.name hash_digest remote_filename)<line_sep><return>tmp<block_end><def_stmt>write_compound_zipfile hash_digest release table_code df metadata<block_start>"""Given a dataframe with a 'geom' column,
create a ZipFile with the data from that dataframe
in both CSV and GeoJSON, returning a semi-persistent
temporary file.
"""<with_stmt>NamedTemporaryFile('wb' suffix='.zip' delete=<false>)<as>tmp<block_start><with_stmt>zipfile.ZipFile(tmp 'w' zipfile.ZIP_DEFLATED)<as>zf<block_start>zf.writestr(build_filename(hash_digest release table_code 'csv') df.drop('geom' axis=1).to_csv(index=<false>))<line_sep>zf.writestr(build_filename(hash_digest release table_code 'geojson') json.dumps(dataframe_to_feature_collection(df 'geom')))<line_sep>zf.writestr(f'metadata.json' json.dumps(metadata indent=2))<line_sep>zf.close()<block_end><block_end><return>tmp<block_end><def_stmt>move_file_to_s3 local_filename hash_digest destination_filename<block_start>"""Considered making this a celery task, but don't think the file created on `web` is available on `worker`
so lets wait to see if we even need the async.
"""<line_sep>s3_client=boto3.client('s3')<try_stmt><block_start>response=s3_client.upload_file(local_filename "files.censusreporter.org" f"aggregation/{hash_digest}/{destination_filename}" ExtraArgs={'ACL':'public-read'})<block_end><except_stmt>ClientError<as>e<block_start>logger.error(e)<line_sep><return><false><block_end><return><true><block_end><def_stmt>build_filename hash_digest release table_code extension<block_start><return>f'{release}_{hash_digest}_{table_code}.{extension}'<block_end>METADATA={'p1':{'title':'Race' 'releases':['dec2010_pl94' 'dec2020_pl94'] 'columns':OrderedDict((('P0010001' 'P1-1: Total') ('P0010002' 'P1-2: Population of one race') ('P0010003' 'P1-3: White alone') ('P0010004' 'P1-4: Black or African American alone') ('P0010005' 'P1-5: American Indian and Alaska Native alone') ('P0010006' 'P1-6: Asian alone') ('P0010007' 'P1-7: Native Hawaiian and Other Pacific Islander alone') ('P0010008' 'P1-8: Some other race alone') ('P0010009' 'P1-9: Population of two or more races') ('P0010010' 'P1-10: Population of two races') ('P0010011' 'P1-11: White; Black or African American') ('P0010012' 'P1-12: White; American Indian and Alaska Native') ('P0010013' 'P1-13: White; Asian') ('P0010014' 'P1-14: White; Native Hawaiian and Other Pacific Islander') ('P0010015' 'P1-15: White; Some other race') ('P0010016' 'P1-16: Black or African American; American Indian and Alaska Native') ('P0010017' 'P1-17: Black or African American; Asian') ('P0010018' 'P1-18: Black or African American; Native Hawaiian and Other Pacific Islander') ('P0010019' 'P1-19: Black or African American; Some other race') ('P0010020' 'P1-20: American Indian and Alaska Native; Asian') ('P0010021' 'P1-21: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0010022' 'P1-22: American Indian and Alaska Native; Some other race') ('P0010023' 'P1-23: Asian; Native Hawaiian and Other Pacific Islander') ('P0010024' 'P1-24: Asian; Some other race') ('P0010025' 'P1-25: Native Hawaiian and Other Pacific Islander; Some other race') ('P0010026' 'P1-26: Population of three races') ('P0010027' 'P1-27: White; Black or African American; American Indian and Alaska Native') ('P0010028' 'P1-28: White; Black or African American; Asian') ('P0010029' 'P1-29: White; Black or African American; Native Hawaiian and Other Pacific Islander') ('P0010030' 'P1-30: White; Black or African American; Some other race') ('P0010031' 'P1-31: White; American Indian and Alaska Native; Asian') ('P0010032' 'P1-32: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0010033' 'P1-33: White; American Indian and Alaska Native; Some other race') ('P0010034' 'P1-34: White; Asian; Native Hawaiian and Other Pacific Islander') ('P0010035' 'P1-35: White; Asian; Some other race') ('P0010036' 'P1-36: White; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010037' 'P1-37: Black or African American; American Indian and Alaska Native; Asian') ('P0010038' 'P1-38: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0010039' 'P1-39: Black or African American; American Indian and Alaska Native; Some other race') ('P0010040' 'P1-40: Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0010041' 'P1-41: Black or African American; Asian; Some other race') ('P0010042' 'P1-42: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010043' 'P1-43: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0010044' 'P1-44: American Indian and Alaska Native; Asian; Some other race') ('P0010045' 'P1-45: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010046' 'P1-46: Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010047' 'P1-47: Population of four races') ('P0010048' 'P1-48: White; Black or African American; American Indian and Alaska Native; Asian') ('P0010049' 'P1-49: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0010050' 'P1-50: White; Black or African American; American Indian and Alaska Native; Some other race') ('P0010051' 'P1-51: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0010052' 'P1-52: White; Black or African American; Asian; Some other race') ('P0010053' 'P1-53: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010054' 'P1-54: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0010055' 'P1-55: White; American Indian and Alaska Native; Asian; Some other race') ('P0010056' 'P1-56: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010057' 'P1-57: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010058' 'P1-58: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0010059' 'P1-59: Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0010060' 'P1-60: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010061' 'P1-61: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010062' 'P1-62: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010063' 'P1-63: Population of five races') ('P0010064' 'P1-64: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0010065' 'P1-65: White; Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0010066' 'P1-66: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010067' 'P1-67: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010068' 'P1-68: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010069' 'P1-69: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0010070' 'P1-70: Population of six races') ('P0010071' 'P1-71: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race')))} 'p2':{'title':'Hispanic or Latino, and not Hispanic or Latino by Race' 'releases':['dec2010_pl94' 'dec2020_pl94'] 'columns':OrderedDict((('P0020001' 'P2-1: Total') ('P0020002' 'P2-2: Hispanic or Latino') ('P0020003' 'P2-3: Not Hispanic or Latino') ('P0020004' 'P2-4: Population of one race') ('P0020005' 'P2-5: White alone') ('P0020006' 'P2-6: Black or African American alone') ('P0020007' 'P2-7: American Indian and Alaska Native alone') ('P0020008' 'P2-8: Asian alone') ('P0020009' 'P2-9: Native Hawaiian and Other Pacific Islander alone') ('P0020010' 'P2-10: Some other race alone') ('P0020011' 'P2-11: Population of two or more races') ('P0020012' 'P2-12: Population of two races') ('P0020013' 'P2-13: White; Black or African American') ('P0020014' 'P2-14: White; American Indian and Alaska Native') ('P0020015' 'P2-15: White; Asian') ('P0020016' 'P2-16: White; Native Hawaiian and Other Pacific Islander') ('P0020017' 'P2-17: White; Some other race') ('P0020018' 'P2-18: Black or African American; American Indian and Alaska Native') ('P0020019' 'P2-19: Black or African American; Asian') ('P0020020' 'P2-20: Black or African American; Native Hawaiian and Other Pacific Islander') ('P0020021' 'P2-21: Black or African American; Some other race') ('P0020022' 'P2-22: American Indian and Alaska Native; Asian') ('P0020023' 'P2-23: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0020024' 'P2-24: American Indian and Alaska Native; Some other race') ('P0020025' 'P2-25: Asian; Native Hawaiian and Other Pacific Islander') ('P0020026' 'P2-26: Asian; Some other race') ('P0020027' 'P2-27: Native Hawaiian and Other Pacific Islander; Some other race') ('P0020028' 'P2-28: Population of three races') ('P0020029' 'P2-29: White; Black or African American; American Indian and Alaska Native') ('P0020030' 'P2-30: White; Black or African American; Asian') ('P0020031' 'P2-31: White; Black or African American; Native Hawaiian and Other Pacific Islander') ('P0020032' 'P2-32: White; Black or African American; Some other race') ('P0020033' 'P2-33: White; American Indian and Alaska Native; Asian') ('P0020034' 'P2-34: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0020035' 'P2-35: White; American Indian and Alaska Native; Some other race') ('P0020036' 'P2-36: White; Asian; Native Hawaiian and Other Pacific Islander') ('P0020037' 'P2-37: White; Asian; Some other race') ('P0020038' 'P2-38: White; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020039' 'P2-39: Black or African American; American Indian and Alaska Native; Asian') ('P0020040' 'P2-40: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0020041' 'P2-41: Black or African American; American Indian and Alaska Native; Some other race') ('P0020042' 'P2-42: Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0020043' 'P2-43: Black or African American; Asian; Some other race') ('P0020044' 'P2-44: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020045' 'P2-45: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0020046' 'P2-46: American Indian and Alaska Native; Asian; Some other race') ('P0020047' 'P2-47: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020048' 'P2-48: Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020049' 'P2-49: Population of four races') ('P0020050' 'P2-50: White; Black or African American; American Indian and Alaska Native; Asian') ('P0020051' 'P2-51: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0020052' 'P2-52: White; Black or African American; American Indian and Alaska Native; Some other race') ('P0020053' 'P2-53: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0020054' 'P2-54: White; Black or African American; Asian; Some other race') ('P0020055' 'P2-55: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020056' 'P2-56: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0020057' 'P2-57: White; American Indian and Alaska Native; Asian; Some other race') ('P0020058' 'P2-58: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020059' 'P2-59: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020060' 'P2-60: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0020061' 'P2-61: Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0020062' 'P2-62: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020063' 'P2-63: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020064' 'P2-64: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020065' 'P2-65: Population of five races') ('P0020066' 'P2-66: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0020067' 'P2-67: White; Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0020068' 'P2-68: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020069' 'P2-69: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020070' 'P2-70: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020071' 'P2-71: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0020072' 'P2-72: Population of six races') ('P0020073' 'P2-73: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race')))} 'p3':{'title':'Race for the Population 18 Years and Over' 'releases':['dec2010_pl94' 'dec2020_pl94'] 'columns':OrderedDict((('P0030001' 'P3-1: Total') ('P0030002' 'P3-2: Population of one race') ('P0030003' 'P3-3: White alone') ('P0030004' 'P3-4: Black or African American alone') ('P0030005' 'P3-5: American Indian and Alaska Native alone') ('P0030006' 'P3-6: Asian alone') ('P0030007' 'P3-7: Native Hawaiian and Other Pacific Islander alone') ('P0030008' 'P3-8: Some other race alone') ('P0030009' 'P3-9: Population of two or more races') ('P0030010' 'P3-10: Population of two races') ('P0030011' 'P3-11: White; Black or African American') ('P0030012' 'P3-12: White; American Indian and Alaska Native') ('P0030013' 'P3-13: White; Asian') ('P0030014' 'P3-14: White; Native Hawaiian and Other Pacific Islander') ('P0030015' 'P3-15: White; Some other race') ('P0030016' 'P3-16: Black or African American; American Indian and Alaska Native') ('P0030017' 'P3-17: Black or African American; Asian') ('P0030018' 'P3-18: Black or African American; Native Hawaiian and Other Pacific Islander') ('P0030019' 'P3-19: Black or African American; Some other race') ('P0030020' 'P3-20: American Indian and Alaska Native; Asian') ('P0030021' 'P3-21: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0030022' 'P3-22: American Indian and Alaska Native; Some other race') ('P0030023' 'P3-23: Asian; Native Hawaiian and Other Pacific Islander') ('P0030024' 'P3-24: Asian; Some other race') ('P0030025' 'P3-25: Native Hawaiian and Other Pacific Islander; Some other race') ('P0030026' 'P3-26: Population of three races') ('P0030027' 'P3-27: White; Black or African American; American Indian and Alaska Native') ('P0030028' 'P3-28: White; Black or African American; Asian') ('P0030029' 'P3-29: White; Black or African American; Native Hawaiian and Other Pacific Islander') ('P0030030' 'P3-30: White; Black or African American; Some other race') ('P0030031' 'P3-31: White; American Indian and Alaska Native; Asian') ('P0030032' 'P3-32: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0030033' 'P3-33: White; American Indian and Alaska Native; Some other race') ('P0030034' 'P3-34: White; Asian; Native Hawaiian and Other Pacific Islander') ('P0030035' 'P3-35: White; Asian; Some other race') ('P0030036' 'P3-36: White; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030037' 'P3-37: Black or African American; American Indian and Alaska Native; Asian') ('P0030038' 'P3-38: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0030039' 'P3-39: Black or African American; American Indian and Alaska Native; Some other race') ('P0030040' 'P3-40: Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0030041' 'P3-41: Black or African American; Asian; Some other race') ('P0030042' 'P3-42: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030043' 'P3-43: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0030044' 'P3-44: American Indian and Alaska Native; Asian; Some other race') ('P0030045' 'P3-45: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030046' 'P3-46: Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030047' 'P3-47: Population of four races') ('P0030048' 'P3-48: White; Black or African American; American Indian and Alaska Native; Asian') ('P0030049' 'P3-49: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0030050' 'P3-50: White; Black or African American; American Indian and Alaska Native; Some other race') ('P0030051' 'P3-51: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0030052' 'P3-52: White; Black or African American; Asian; Some other race') ('P0030053' 'P3-53: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030054' 'P3-54: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0030055' 'P3-55: White; American Indian and Alaska Native; Asian; Some other race') ('P0030056' 'P3-56: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030057' 'P3-57: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030058' 'P3-58: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0030059' 'P3-59: Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0030060' 'P3-60: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030061' 'P3-61: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030062' 'P3-62: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030063' 'P3-63: Population of five races') ('P0030064' 'P3-64: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0030065' 'P3-65: White; Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0030066' 'P3-66: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030067' 'P3-67: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030068' 'P3-68: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030069' 'P3-69: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0030070' 'P3-70: Population of six races') ('P0030071' 'P3-71: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race')))} 'p4':{'title':'Hispanic or Latino, and not Hispanic or Latino by Race for the Population 18 Years and Over' 'releases':['dec2010_pl94' 'dec2020_pl94'] 'columns':OrderedDict((('P0040001' 'P4-1: Total') ('P0040002' 'P4-2: Hispanic or Latino') ('P0040003' 'P4-3: Not Hispanic or Latino') ('P0040004' 'P4-4: Population of one race') ('P0040005' 'P4-5: White alone') ('P0040006' 'P4-6: Black or African American alone') ('P0040007' 'P4-7: American Indian and Alaska Native alone') ('P0040008' 'P4-8: Asian alone') ('P0040009' 'P4-9: Native Hawaiian and Other Pacific Islander alone') ('P0040010' 'P4-10: Some other race alone') ('P0040011' 'P4-11: Population of two or more races') ('P0040012' 'P4-12: Population of two races') ('P0040013' 'P4-13: White; Black or African American') ('P0040014' 'P4-14: White; American Indian and Alaska Native') ('P0040015' 'P4-15: White; Asian') ('P0040016' 'P4-16: White; Native Hawaiian and Other Pacific Islander') ('P0040017' 'P4-17: White; Some other race') ('P0040018' 'P4-18: Black or African American; American Indian and Alaska Native') ('P0040019' 'P4-19: Black or African American; Asian') ('P0040020' 'P4-20: Black or African American; Native Hawaiian and Other Pacific Islander') ('P0040021' 'P4-21: Black or African American; Some other race') ('P0040022' 'P4-22: American Indian and Alaska Native; Asian') ('P0040023' 'P4-23: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0040024' 'P4-24: American Indian and Alaska Native; Some other race') ('P0040025' 'P4-25: Asian; Native Hawaiian and Other Pacific Islander') ('P0040026' 'P4-26: Asian; Some other race') ('P0040027' 'P4-27: Native Hawaiian and Other Pacific Islander; Some other race') ('P0040028' 'P4-28: Population of three races') ('P0040029' 'P4-29: White; Black or African American; American Indian and Alaska Native') ('P0040030' 'P4-30: White; Black or African American; Asian') ('P0040031' 'P4-31: White; Black or African American; Native Hawaiian and Other Pacific Islander') ('P0040032' 'P4-32: White; Black or African American; Some other race') ('P0040033' 'P4-33: White; American Indian and Alaska Native; Asian') ('P0040034' 'P4-34: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0040035' 'P4-35: White; American Indian and Alaska Native; Some other race') ('P0040036' 'P4-36: White; Asian; Native Hawaiian and Other Pacific Islander') ('P0040037' 'P4-37: White; Asian; Some other race') ('P0040038' 'P4-38: White; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040039' 'P4-39: Black or African American; American Indian and Alaska Native; Asian') ('P0040040' 'P4-40: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0040041' 'P4-41: Black or African American; American Indian and Alaska Native; Some other race') ('P0040042' 'P4-42: Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0040043' 'P4-43: Black or African American; Asian; Some other race') ('P0040044' 'P4-44: Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040045' 'P4-45: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0040046' 'P4-46: American Indian and Alaska Native; Asian; Some other race') ('P0040047' 'P4-47: American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040048' 'P4-48: Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040049' 'P4-49: Population of four races') ('P0040050' 'P4-50: White; Black or African American; American Indian and Alaska Native; Asian') ('P0040051' 'P4-51: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander') ('P0040052' 'P4-52: White; Black or African American; American Indian and Alaska Native; Some other race') ('P0040053' 'P4-53: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander') ('P0040054' 'P4-54: White; Black or African American; Asian; Some other race') ('P0040055' 'P4-55: White; Black or African American; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040056' 'P4-56: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0040057' 'P4-57: White; American Indian and Alaska Native; Asian; Some other race') ('P0040058' 'P4-58: White; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040059' 'P4-59: White; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040060' 'P4-60: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0040061' 'P4-61: Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0040062' 'P4-62: Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040063' 'P4-63: Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040064' 'P4-64: American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040065' 'P4-65: Population of five races') ('P0040066' 'P4-66: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander') ('P0040067' 'P4-67: White; Black or African American; American Indian and Alaska Native; Asian; Some other race') ('P0040068' 'P4-68: White; Black or African American; American Indian and Alaska Native; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040069' 'P4-69: White; Black or African American; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040070' 'P4-70: White; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040071' 'P4-71: Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race') ('P0040072' 'P4-72: Population of six races') ('P0040073' 'P4-73: White; Black or African American; American Indian and Alaska Native; Asian; Native Hawaiian and Other Pacific Islander; Some other race'))) } 'p5':{'title':'Group Quarters Population by Major Group Quarters Type' 'releases':['dec2020_pl94'] 'columns':OrderedDict((('P0050001' 'Total:') ('P0050002' 'Institutionalized population:') ('P0050003' 'Correctional facilities for adults') ('P0050004' 'Juvenile facilities') ('P0050005' 'Nursing facilities/Skilled-nursing facilities') ('P0050006' 'Other institutional facilities') ('P0050007' 'Noninstitutionalized population:') ('P0050008' 'College/University student housing') ('P0050009' 'Military quarters') ('P0050010' 'Other noninstitutional facilities') ))} 'h1':{'title':'Occupancy Status' 'releases':['dec2010_pl94' 'dec2020_pl94'] 'columns':OrderedDict((('H0010001' 'H1-1: Total') ('H0010002' 'H1-2: Occupied') ('H0010003' 'H1-3: Vacant'))) }}<line_sep>
|
<import_from_stmt>PIL Image<import_from_stmt>os listdir<def_stmt>averageColor filename<block_start>image=Image.open(filename).convert('RGB')<line_sep>r,g,b=0. 0. 0.<line_sep>pixels=image.size[0]<times>image.size[1]<for_stmt>x range(image.size[0])<block_start><for_stmt>y range(image.size[1])<block_start>rgb=image.getpixel((x y))<line_sep>r<augadd>rgb[0]<line_sep>g<augadd>rgb[1]<line_sep>b<augadd>rgb[2]<block_end><block_end>image.close()<line_sep><return>int(round(r/pixels)) int(round(g/pixels)) int(round(b/pixels))<block_end>print("colorDictionary={")<for_stmt>f listdir('assets/minecraft/textures/blocks')<block_start><if_stmt>f.lower().endswith(".png")<block_start>print(" '"+f[:-4]+"': "+str(averageColor('assets/minecraft/textures/blocks/'+f))+",")<block_end><block_end>print("}")<line_sep>
|
"""
==========
Polar Demo
==========
Demo of a line plot on a polar axis.
"""<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>r=np.arange(0 2 0.01)<line_sep>theta=2<times>np.pi<times>r<line_sep>ax=plt.subplot(111 projection='polar')<line_sep>ax.plot(theta r)<line_sep>ax.set_rmax(2)<line_sep>ax.set_rticks([0.5 1 1.5 2])# Less radial ticks
ax.set_rlabel_position(-22.5)# Move radial labels away from plotted line
ax.grid(<true>)<line_sep>ax.set_title("A line plot on a polar axis" va='bottom')<line_sep>plt.show()<line_sep>#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
<import_stmt>matplotlib<line_sep>matplotlib.axes.Axes.plot<line_sep>matplotlib.projections.polar<line_sep>matplotlib.projections.polar.PolarAxes<line_sep>matplotlib.projections.polar.PolarAxes.set_rticks<line_sep>matplotlib.projections.polar.PolarAxes.set_rmax<line_sep>matplotlib.projections.polar.PolarAxes.set_rlabel_position<line_sep>
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>VaultUsage(object)<block_start>"""
VaultUsage model.
"""<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new VaultUsage object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key_count:
The value to assign to the key_count property of this VaultUsage.
:type key_count: int
:param key_version_count:
The value to assign to the key_version_count property of this VaultUsage.
:type key_version_count: int
:param software_key_count:
The value to assign to the software_key_count property of this VaultUsage.
:type software_key_count: int
:param software_key_version_count:
The value to assign to the software_key_version_count property of this VaultUsage.
:type software_key_version_count: int
"""<line_sep>self.swagger_types={'key_count':'int' 'key_version_count':'int' 'software_key_count':'int' 'software_key_version_count':'int'}<line_sep>self.attribute_map={'key_count':'keyCount' 'key_version_count':'keyVersionCount' 'software_key_count':'softwareKeyCount' 'software_key_version_count':'softwareKeyVersionCount'}<line_sep>self._key_count=<none><line_sep>self._key_version_count=<none><line_sep>self._software_key_count=<none><line_sep>self._software_key_version_count=<none><block_end>@property<def_stmt>key_count self<block_start>"""
**[Required]** Gets the key_count of this VaultUsage.
The number of keys in this vault that persist on a hardware security module (HSM), across all compartments, excluding keys in a `DELETED` state.
:return: The key_count of this VaultUsage.
:rtype: int
"""<line_sep><return>self._key_count<block_end>@key_count.setter<def_stmt>key_count self key_count<block_start>"""
Sets the key_count of this VaultUsage.
The number of keys in this vault that persist on a hardware security module (HSM), across all compartments, excluding keys in a `DELETED` state.
:param key_count: The key_count of this VaultUsage.
:type: int
"""<line_sep>self._key_count=key_count<block_end>@property<def_stmt>key_version_count self<block_start>"""
**[Required]** Gets the key_version_count of this VaultUsage.
The number of key versions in this vault that persist on a hardware security module (HSM), across all compartments, excluding key versions in a `DELETED` state.
:return: The key_version_count of this VaultUsage.
:rtype: int
"""<line_sep><return>self._key_version_count<block_end>@key_version_count.setter<def_stmt>key_version_count self key_version_count<block_start>"""
Sets the key_version_count of this VaultUsage.
The number of key versions in this vault that persist on a hardware security module (HSM), across all compartments, excluding key versions in a `DELETED` state.
:param key_version_count: The key_version_count of this VaultUsage.
:type: int
"""<line_sep>self._key_version_count=key_version_count<block_end>@property<def_stmt>software_key_count self<block_start>"""
Gets the software_key_count of this VaultUsage.
The number of keys in this vault that persist on the server, across all compartments, excluding keys in a `DELETED` state.
:return: The software_key_count of this VaultUsage.
:rtype: int
"""<line_sep><return>self._software_key_count<block_end>@software_key_count.setter<def_stmt>software_key_count self software_key_count<block_start>"""
Sets the software_key_count of this VaultUsage.
The number of keys in this vault that persist on the server, across all compartments, excluding keys in a `DELETED` state.
:param software_key_count: The software_key_count of this VaultUsage.
:type: int
"""<line_sep>self._software_key_count=software_key_count<block_end>@property<def_stmt>software_key_version_count self<block_start>"""
Gets the software_key_version_count of this VaultUsage.
The number of key versions in this vault that persist on the server, across all compartments, excluding key versions in a `DELETED` state.
:return: The software_key_version_count of this VaultUsage.
:rtype: int
"""<line_sep><return>self._software_key_version_count<block_end>@software_key_version_count.setter<def_stmt>software_key_version_count self software_key_version_count<block_start>"""
Sets the software_key_version_count of this VaultUsage.
The number of key versions in this vault that persist on the server, across all compartments, excluding key versions in a `DELETED` state.
:param software_key_version_count: The software_key_version_count of this VaultUsage.
:type: int
"""<line_sep>self._software_key_version_count=software_key_version_count<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
|
<import_from_stmt>common *# NOQA
<import_stmt>json<line_sep>@pytest.mark.nonparallel<def_stmt>test_ha_config admin_user_client<block_start>ha_config=find_one(admin_user_client.list_ha_config)<line_sep>admin_user_client.update(ha_config enabled=<false>)<line_sep>ha_config=find_one(admin_user_client.list_ha_config)<assert_stmt><not>ha_config.enabled<line_sep>admin_user_client.update(ha_config enabled=<true>)<line_sep>ha_config=find_one(admin_user_client.list_ha_config)<assert_stmt>ha_config.enabled<line_sep>admin_user_client.update(ha_config enabled=<false>)<line_sep>ha_config=find_one(admin_user_client.list_ha_config)<assert_stmt><not>ha_config.enabled<assert_stmt>ha_config.dbHost<in>['localhost' '127.0.0.1']<assert_stmt>ha_config.dbSize<g>0<block_end><def_stmt>test_ha_config_script admin_user_client<block_start>ha_config=find_one(admin_user_client.list_ha_config)<line_sep>create_url=ha_config.actions['createscript']<line_sep>r=requests.post(create_url data=json.dumps({'clusterSize':5 'httpPort':1234 'httpsPort':1235 'redisPort':6375 'zookeeperQuorumPort':6375 'zookeeperLeaderPort':6375 'zookeeperClientPort':6375 'cert':'cert' 'certChain':'certChain' 'key':'key' 'hostRegistrationUrl':'https://....' 'swarmEnabled':<false> 'httpEnabled':<false> }))<assert_stmt>r.text<is><not><none><assert_stmt>r.status_code<eq>200<def_stmt>check <block_start>ha_config=find_one(admin_user_client.list_ha_config)<line_sep><return>ha_config.clusterSize<eq>5<block_end>wait_for(check)<block_end>@pytest.mark.nonparallel<def_stmt>test_ha_config_dbdump admin_user_client<block_start>ha_config=find_one(admin_user_client.list_ha_config)<line_sep>dump=ha_config.links['dbdump']<line_sep>r=requests.get(dump)<assert_stmt>r.text<is><not><none><assert_stmt>r.status_code<eq>200<block_end>
|
<import_stmt>pytest<import_stmt>testinfra<line_sep>check_output=testinfra.get_host('local://').check_output<class_stmt>CommandLineArguments<block_start><def_stmt>__init__ self docker_image<block_start>self.docker_image=docker_image<block_end><block_end>@pytest.fixture()<def_stmt>host request<block_start>arguments=_parse_command_line_arguments(request)<line_sep>image_id=arguments.docker_image<or>check_output('docker build -q %s' request.param)<line_sep>container_id=check_output('docker run -d --entrypoint tail %s -f /dev/null' image_id)<def_stmt>teardown <block_start>check_output('docker rm -f %s' container_id)<block_end>request.addfinalizer(teardown)<line_sep><return>testinfra.get_host('docker://'+container_id)<block_end><def_stmt>_parse_command_line_arguments request<block_start>option_docker_image=request.config.getoption('--docker-image')<line_sep><return>CommandLineArguments(docker_image=option_docker_image)<block_end><def_stmt>pytest_addoption parser<block_start>parser.addoption('--docker-image' action='store' type='string' help='Login for admin bitbucket user' required=<false>)<block_end><def_stmt>pytest_generate_tests metafunc<block_start><if_stmt>'host'<in>metafunc.fixturenames<block_start>marker=metafunc.definition.get_closest_marker('docker')<if_stmt>marker<is><none><block_start><raise>Exception('docker marker is required for infrastructure tests')<block_end>path=marker.kwargs.get('path')<if_stmt>path<is><none><block_start>path='.'<block_end>metafunc.parametrize('host' [path] indirect=<true> scope='module')<block_end><block_end>
|
<import_stmt>copy<import_stmt>json<import_from_stmt>elastalert.kibana add_filter<import_from_stmt>elastalert.kibana dashboard_temp<import_from_stmt>elastalert.kibana filters_from_dashboard<import_from_stmt>elastalert.kibana kibana4_dashboard_link<import_from_stmt>elastalert.util EAException<line_sep># Dashboard schema with only filters section
test_dashboard='''{
"title": "AD Lock Outs",
"services": {
"filter": {
"list": {
"0": {
"type": "time",
"field": "@timestamp",
"from": "now-7d",
"to": "now",
"mandate": "must",
"active": true,
"alias": "",
"id": 0
},
"1": {
"type": "field",
"field": "_log_type",
"query": "\\"active_directory\\"",
"mandate": "must",
"active": true,
"alias": "",
"id": 1
},
"2": {
"type": "querystring",
"query": "ad.security_auditing_code:4740",
"mandate": "must",
"active": true,
"alias": "",
"id": 2
},
"3": {
"type": "range",
"field": "@timestamp",
"mandate": "must",
"active": true,
"alias": "",
"from": "2014-09-27T12:34:45Z",
"to": "2014-09-26T12:34:45Z",
"id": 3
},
"4": {
"field": "@timestamp",
"alias": "",
"mandate": "mustNot",
"active": true,
"query": "that",
"type": "field",
"id": 4
},
"5": {
"field": "@timestamp",
"alias": "",
"mandate": "either",
"active": true,
"query": "that",
"type": "field",
"id": 5
}
},
"ids": [
0,
1,
2,
3,
4,
5
]
}
}
}'''<line_sep>test_dashboard=json.loads(test_dashboard)<line_sep>test_dashboard2='''{
"title": "AD Lock Outs",
"services": {
"filter": {
"list": {
"0": {
"type": "time",
"field": "@timestamp",
"from": "now-7d",
"to": "now",
"mandate": "must",
"active": true,
"alias": "",
"id": 0
},
"1": {
"type": "field",
"field": "_log_type",
"query": "\\"active_directory\\"",
"mandate": "must",
"active": true,
"alias": "",
"id": 1
}
},
"ids": [
0,
1
]
}
}
}'''<line_sep>test_dashboard2=json.loads(test_dashboard2)<def_stmt>test_filters_from_dashboard <block_start>filters=filters_from_dashboard(test_dashboard)<assert_stmt>{'term':{'_log_type':'"active_directory"'}}<in>filters<assert_stmt>{'query':{'query_string':{'query':'ad.security_auditing_code:4740'}}}<in>filters<assert_stmt>{'range':{'@timestamp':{'from':'2014-09-27T12:34:45Z' 'to':'2014-09-26T12:34:45Z'}}}<in>filters<assert_stmt>{'not':{'term':{'@timestamp':'that'}}}<in>filters<assert_stmt>{'or':[{'term':{'@timestamp':'that'}}]}<in>filters<block_end><def_stmt>test_filters_from_dashboard2 <block_start>filters=filters_from_dashboard(test_dashboard2)<assert_stmt>{'term':{'_log_type':'"active_directory"'}}<in>filters<block_end><def_stmt>test_add_filter <block_start>basic_filter={"term":{"this":"that"}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db basic_filter)<assert_stmt>db['services']['filter']['list']['1']<eq>{'field':'this' 'alias':'' 'mandate':'must' 'active':<true> 'query':'"that"' 'type':'field' 'id':1}<line_sep>list_filter={"term":{"this":["that" "those"]}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db list_filter)<assert_stmt>db['services']['filter']['list']['1']<eq>{'field':'this' 'alias':'' 'mandate':'must' 'active':<true> 'query':'("that" AND "those")' 'type':'field' 'id':1}<line_sep>not_filter={'not':{'term':{'this':'that'}}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db not_filter)<assert_stmt>db['services']['filter']['list']['1']<eq>{'field':'this' 'alias':'' 'mandate':'mustNot' 'active':<true> 'query':'"that"' 'type':'field' 'id':1}<line_sep>START_TIMESTAMP='2014-09-26T12:34:45Z'<line_sep>END_TIMESTAMP='2014-09-27T12:34:45Z'<line_sep>range_filter={'range':{'@timestamp':{'lte':END_TIMESTAMP 'gt':START_TIMESTAMP}}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db range_filter)<assert_stmt>db['services']['filter']['list']['1']<eq>{'field':'@timestamp' 'alias':'' 'mandate':'must' 'active':<true> 'lte':'2014-09-27T12:34:45Z' 'gt':'2014-09-26T12:34:45Z' 'type':'range' 'id':1}<line_sep>query_filter={'query':{'wildcard':'this*that'}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db query_filter)<assert_stmt>db['services']['filter']['list']['1']<eq>{'alias':'' 'mandate':'must' 'active':<true> 'id':1}<line_sep>query_string_filter={'query':{'query_string':{'query':'ad.security_auditing_code:4740'}}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db query_string_filter)<assert_stmt>db['services']['filter']['list']['1']<eq>{'alias':'' 'mandate':'must' 'active':<true> 'query':'ad.security_auditing_code:4740' 'type':'querystring' 'id':1}<try_stmt><block_start>error_filter={'bool':{'must':[{'range':{'@timestamp':{'lte':END_TIMESTAMP 'gt':START_TIMESTAMP}}}]}}<line_sep>db=copy.deepcopy(dashboard_temp)<line_sep>add_filter(db error_filter)<block_end><except_stmt>EAException<as>ea<block_start>excepted="Could not parse filter {'bool': {'must': [{'range': {'@timestamp': "<line_sep>excepted<augadd>"{'lte': '2014-09-27T12:34:45Z', 'gt': '2014-09-26T12:34:45Z'}}}]}} for Kibana"<assert_stmt>excepted<eq>str(ea)<block_end><block_end><def_stmt>test_url_encoded <block_start>url=kibana4_dashboard_link('example.com/#/Dashboard' '2015-01-01T00:00:00Z' '2017-01-01T00:00:00Z')<assert_stmt><not>any([special_char<in>url<for>special_char ["',\":;?&=()"]])<block_end><def_stmt>test_url_env_substitution environ<block_start>environ.update({'KIBANA_HOST':'kibana' 'KIBANA_PORT':'5601' })<line_sep>url=kibana4_dashboard_link('http://$KIBANA_HOST:$KIBANA_PORT/#/Dashboard' '2015-01-01T00:00:00Z' '2017-01-01T00:00:00Z' )<assert_stmt>url.startswith('http://kibana:5601/#/Dashboard')<block_end>
|
<import_from_stmt>collections defaultdict<import_stmt>datetime<import_stmt>log<import_from_stmt>mock patch<import_stmt>os<import_stmt>os.path<import_stmt>preferences<import_stmt>process<import_stmt>psutil<line_sep>#
# TODO: Fix tests, needs work on Auger's automatic test generator
#
<import_from_stmt>psutil Popen<import_stmt>sys<import_stmt>unittest<import_stmt>utils<import_stmt>versions.v00001.process<import_stmt>versions.v00001.suspender<import_from_stmt>versions.v00001.suspender defaultdict<import_stmt>versions.v00001.utils<import_from_stmt>versions.v00001.utils OnMainThread<class_stmt>LogTest(unittest.TestCase)<block_start>@patch.object(os.path 'join')@patch.object(os.path 'exists')<def_stmt>test_get_log_path self mock_exists mock_join<block_start>mock_exists.return_value=<true><line_sep>mock_join.return_value='/Users/chris/HappyMacApp/downloads/v00001'<line_sep>self.assertEqual(log.get_log_path() '/Users/chris/HappyMacApp/happymac_log.txt')<block_end><def_stmt>test_log self<block_start>self.assertEqual(log.log(message='Google process 44784 ()' error=<none>) <none>)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_stmt>os<import_stmt>unittest<import_stmt>pymel.util.path<import_from_stmt>pymel.util.path path<class_stmt>TestPath(unittest.TestCase)<block_start><def_stmt>test_misc self<block_start>thisFile=path(__file__)<line_sep>self.assertTrue(thisFile.exists())<line_sep>self.assertTrue(thisFile.isfile())<line_sep>self.assertFalse(thisFile.isdir())<line_sep>self.assertIn(thisFile.basename() (path('test_util_path.py') path('test_util_path.pyc')))<line_sep>self.assertEqual(thisFile.namebase 'test_util_path')<line_sep>self.assertIn(thisFile.ext ('.py' '.pyc'))<line_sep>thisDir=thisFile.dirname()<line_sep>self.assertEqual(thisDir os.path.dirname(str(thisFile)))<line_sep>self.assertTrue(thisDir.exists())<line_sep>self.assertFalse(thisDir.isfile())<line_sep>self.assertTrue(thisDir.isdir())<line_sep>self.assertEqual(thisDir.basename() 'tests')<line_sep>self.assertEqual(thisDir.namebase 'tests')<line_sep>self.assertEqual(thisDir.ext '')<line_sep>files=thisDir.files()<line_sep>self.assertIn(thisFile files)<line_sep>noExist=path('slartybartfast_fasdfjlkfjl')<line_sep>self.assertFalse(noExist.exists())<line_sep>self.assertFalse(noExist.isfile())<line_sep>self.assertFalse(noExist.isdir())<block_end><block_end>
|
# coding: utf-8
<import_stmt>unittest<import_from_stmt>data_structures.trees.trie Trie<class_stmt>TrieNodeTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.trie=Trie()<block_end><def_stmt>test__len__ self<block_start>self.assertEqual(len(self.trie) 0)<block_end><def_stmt>test_insert self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>self.trie.insert('')<block_end><block_end><def_stmt>test_search self<block_start>self.assertEqual(self.trie.search('') <false>)<block_end><def_stmt>test_startsWith self<block_start>self.assertEqual(self.trie.startsWith('') <true>)<block_end><def_stmt>test_integration self<block_start>self.trie.insert('apple')<line_sep>self.assertEqual(len(self.trie) 1)<line_sep>self.assertEqual(self.trie.search('apple') <true>)<line_sep>self.assertEqual(self.trie.search('app') <false>)<line_sep>self.assertEqual(self.trie.startsWith('app') <true>)<line_sep>self.trie.insert('app')<line_sep>self.assertEqual(len(self.trie) 2)<line_sep>self.assertEqual(self.trie.search('app') <true>)<line_sep>self.assertEqual(self.trie.startsWith('app') <true>)<line_sep>self.trie.insert('hammer')<line_sep>self.assertEqual(len(self.trie) 3)<line_sep>self.assertEqual(self.trie.search('hammers') <false>)<line_sep>self.assertEqual(self.trie.startsWith('hammers') <false>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
<import_stmt>sys<import_stmt>os.path<line_sep>sys.path.append(os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__) "..")))<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>sklearn.neighbors KernelDensity<import_from_stmt>tensorflow.python.keras.datasets mnist<import_from_stmt>data.data_handler ProcessedNNHandler<import_from_stmt>definitions DATA_PATH<import_from_stmt>evaluation.create_plot save_plot<def_stmt>configure_plt <block_start>plt.rc('font' size=14)<line_sep>plt.rc('axes' titlesize=14)<line_sep>plt.rc('axes' labelsize=14)<line_sep>plt.rc('xtick' labelsize=14)<line_sep>plt.rc('ytick' labelsize=14)<line_sep>plt.rc('legend' fontsize=14)<line_sep>plt.rc('figure' titlesize=14)<block_end><def_stmt>plot_mnist_samples width:int=6 height:int=2<block_start>(x_train y_train),(_ _)=mnist.load_data()<line_sep>fig,axs=plt.subplots(height width figsize=(width height))<for_stmt>i range(height)<block_start><for_stmt>j range(width)<block_start>first_image=x_train[j+width<times>i+120]<line_sep>first_image=np.array(first_image dtype='float')<line_sep>pixels=first_image.reshape((28 28))<line_sep>axs[i j].imshow(pixels cmap='gray')<block_end><block_end><for_stmt>ax axs.flat<block_start>ax.label_outer()<block_end>plt.subplots_adjust(wspace=0.2 hspace=0.2)<block_end><def_stmt>plot_kernels <block_start>fig,ax=plt.subplots(figsize=(8 4) subplot_kw={'facecolor':'#F4F4F4' 'axisbelow':<true>})<line_sep>ax.grid(color='white' linestyle='-' linewidth=2)<for_stmt>spine ax.spines.values()<block_start>spine.set_color('#BBBBBB')<block_end>X_src=np.zeros((1 1))<line_sep>x_grid=np.linspace(-3 3 1000)<for_stmt>kernel ['gaussian' 'tophat' 'exponential' 'epanechnikov' 'linear' 'cosine']<block_start>log_dens=KernelDensity(kernel=kernel).fit(X_src).score_samples(x_grid[: <none>])<if_stmt>kernel<is>'epanechnikov'<block_start>ax.plot(x_grid np.exp(log_dens) lw=6 alpha=0.8 label=kernel)<block_end><else_stmt><block_start>ax.plot(x_grid np.exp(log_dens) lw=3 alpha=0.5 label=kernel)<block_end><block_end>ax.set_ylim(0 1.05)<line_sep>ax.set_xlim(-2.9 2.9)<line_sep>ax.legend()<block_end><def_stmt>plot_histogram path:str<block_start>processed_nn:ProcessedNNHandler=ProcessedNNHandler(DATA_PATH+path)<line_sep>samples:np.array=processed_nn.get_all_samples()<line_sep>z_values:np.array=np.zeros(samples.shape[0])<for_stmt>i,sample enumerate(samples)<block_start>z_values[i]=sample[2]<block_end>z_values=z_values.reshape(-1 1)<line_sep>slots:int=50<line_sep>x_grid=np.linspace(-1.2 1.2 int(slots<times>1.2<times>4.0))<line_sep>fig,ax=plt.subplots()<for_stmt>bandwidth [0.05 0.18 0.5]<block_start>pdf=KernelDensity(kernel='epanechnikov' bandwidth=bandwidth).fit(z_values).score_samples(x_grid[: <none>])<line_sep>ax.plot(x_grid np.exp(pdf) linewidth=2 alpha=0.6 label='bandwidth=%.2f'%bandwidth)<block_end>ax.hist(z_values slots facecolor='gray' histtype='stepfilled' alpha=0.4 density=<true>)<line_sep>ax.legend(loc='upper right')<line_sep>ax.set_xlim(-1.2 1.2)<block_end>configure_plt()<line_sep>plot_mnist_samples()<line_sep>save_plot('mnist')<line_sep>plt.show()<line_sep>
|
<import_stmt>tempfile<import_from_stmt>nose.tools assert_raises<import_from_stmt>gffutils parser create feature iterators constants helpers exceptions <import_from_stmt>gffutils example_filename create_db<import_from_stmt>. attr_test_cases<import_from_stmt>textwrap dedent<line_sep>TEST_FILENAMES=[example_filename(i)<for>i ['c_elegans_WS199_ann_gff.txt' 'ensembl_gtf.txt' 'hybrid1.gff3' 'ncbi_gff3.txt' 'c_elegans_WS199_dna_shortened.fa' 'F3-unique-3.v2.gff' 'jgi_gff2.txt' 'wormbase_gff2_alt.txt' 'c_elegans_WS199_shortened_gff.txt' 'glimmer_nokeyval.gff3' 'mouse_extra_comma.gff3' 'wormbase_gff2.txt']]<def_stmt>test_directives <block_start>data=dedent("""
##directive1 example
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
""")<line_sep>it=iterators._StringIterator(data)<line_sep>db=create_db(data dbfn=':memory:' from_string=<true> verbose=<false>)<assert_stmt>it.directives<eq>db.directives<eq>['directive1 example'] (it.directives db.directives)<block_end><def_stmt>test_split_attrs # nosetests generator for all the test cases in attr_test_cases. (note no
# docstring for this test function so that nosetests -v will print the test
# cases)
<block_start><for_stmt>(attr_str attr_dict acceptable_reconstruction) attr_test_cases.attrs<block_start><yield>attrs_OK attr_str attr_dict acceptable_reconstruction<block_end><block_end><def_stmt>attrs_OK attr_str attr_dict acceptable_reconstruction=<none><block_start>"""
Given an attribute string and a dictionary of what you expect, test the
attribute splitting and reconstruction (invariant roundtrip).
There are some corner cases for the roundtrip invariance that don't work
(see attr_test_cases.py for details); `acceptable_reconstruction` handles
those.
"""<line_sep>result,dialect=parser._split_keyvals(attr_str)<assert_stmt>result<eq>attr_dict result<line_sep>reconstructed=parser._reconstruct(result dialect keep_order=<true>)<if_stmt>acceptable_reconstruction<block_start><assert_stmt>reconstructed<eq>acceptable_reconstruction reconstructed<block_end><else_stmt><block_start><assert_stmt>reconstructed<eq>attr_str reconstructed<block_end><block_end><def_stmt>parser_smoke_test <block_start>"""
Just confirm we can iterate completely through the test files....
"""<line_sep># Don't show the warnings for tests
<import_stmt>logging<line_sep>parser.logger.setLevel(logging.CRITICAL)<for_stmt>filename TEST_FILENAMES<block_start>p=iterators._FileIterator(filename)<for_stmt>i p<block_start><continue><block_end><block_end><block_end><def_stmt>test_empty_recontruct <block_start>"""
reconstructing attributes with incomplete information returns empty string
"""<assert_stmt>parser._reconstruct(<none> constants.dialect)<eq>""<line_sep>assert_raises(exceptions.AttributeStringError parser._reconstruct dict(ID='asdf') <none>)<line_sep>assert_raises(exceptions.AttributeStringError parser._reconstruct <none> <none>)<block_end><def_stmt>test_empty_split_keyvals <block_start>attrs,dialect=parser._split_keyvals(keyval_str=<none>)<assert_stmt>attrs<eq>feature.dict_class()<assert_stmt>dialect<eq>constants.dialect<block_end><def_stmt>test_repeated_keys_conflict <block_start>"""
if dialect says repeated keys, but len(vals) > 1, then the keys are not
actually repeated....
"""<line_sep>#
# This is now only checked when infer_dialect is True -- and is disabled
# when a dialect is provided
#
#dialect = constants.dialect.copy()
#dialect['repeated keys'] = True
#assert_raises(exceptions.AttributeStringError, parser._split_keyvals, "Parent=1,2,3", dialect)
<block_end><def_stmt>test_parser_from_string <block_start>"""
make sure from string and from file return identical results
"""<line_sep>line=b"chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"<line_sep>tmp=tempfile.NamedTemporaryFile()<line_sep>tmp.write(line)<line_sep>tmp.seek(0)<line_sep>p1=iterators._StringIterator("chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690")<line_sep>p2=iterators._FileIterator(tmp.name)<line_sep>lines=list(zip(p1 p2))<assert_stmt>len(lines)<eq>1<assert_stmt>p1.current_item_number<eq>p2.current_item_number<eq>0<assert_stmt>lines[0][0]<eq>lines[0][1]<block_end><def_stmt>test_valid_line_count <block_start>p=iterators._FileIterator(example_filename('ncbi_gff3.txt'))<assert_stmt>len(list(p))<eq>17<line_sep>p=iterators._FileIterator(example_filename('hybrid1.gff3'))<assert_stmt>len(list(p))<eq>6<line_sep>p=iterators._FileIterator(example_filename('FBgn0031208.gff'))<assert_stmt>len(list(p))<eq>27<block_end><def_stmt>test_inconsistent_dialect <block_start>"""
The second feature does not have a trailing semicolon (wormbase_gff2_alt is
like this). But since the first feature does, that's what the db's dialect
is set to, which can cause errors when parsing attributes.
"""<line_sep>db=create.create_db("""
chr1 . gene 1 100 . + . gene_id "gene1";
chr1 . mRNA 1 100 . + . transcript_id "mRNA1"
""" ':memory:' from_string=<true>)<line_sep>items=list(db.all_features())<line_sep>print(items[0])<line_sep># before, was ['"mRNA1'] -- note extra "
<assert_stmt>items[1].attributes['transcript_id']<eq>['mRNA1'] items[1].attributes['transcript_id']<block_end><def_stmt>test_attributes <block_start>s="chr2L FlyBase mRNA 7529 9484 . + . ID=FBtr0300690;Name=CG11023-RC;Parent=FBgn0031208;"<line_sep>f=feature.feature_from_line(s)<line_sep>f.keep_order=<true><assert_stmt>str(f)<eq>s str(f)<block_end>
|
<import_from_stmt>.build_flow JavascriptEnhancementsBuildFlowCommand<import_from_stmt>.add_flow_definition JavascriptEnhancementsAddFlowDefinitionCommand<line_sep>__all__=["JavascriptEnhancementsBuildFlowCommand" "JavascriptEnhancementsAddFlowDefinitionCommand"]<line_sep>
|
<import_stmt>re<import_stmt>sys<import_stmt>os<line_sep>print('***********************************************************************')<line_sep>print('Let us check on that pyarrow version...')<line_sep>print('***********************************************************************')<line_sep>print()<line_sep>pyarrow_version=sys.modules["pyarrow"].__version__<line_sep>f=re.search("0.15.+" pyarrow_version)<if_stmt>(f<eq><none>)<block_start><for_stmt>key list(sys.modules.keys())<block_start><if_stmt>key.startswith("pyarrow")<block_start><del_stmt>sys.modules[key]<line_sep>print(f"unloaded pyarrow {pyarrow_version}")<block_end><block_end><import_stmt>pyarrow<line_sep>pyarrow_version=sys.modules['pyarrow'].__version__<line_sep>print(f"loaded pyarrow {pyarrow_version}")<line_sep>print(f"You're now running pyarrow {pyarrow_version} and are good to go!")<del_stmt>(pyarrow_version)<block_end><else_stmt><block_start>print(f"You're running pyarrow {pyarrow_version} and are good to go!")<block_end>
|
# We know that a^2 + b^2 = c^2, and wish to use this to compute c
<import_from_stmt>math sqrt hypot<line_sep>a=3e154# a^2 > 1e308
b=4e154# b^2 > 1e308
# with these, c = 5e154 which is less that 1e308
<def_stmt>longSideDirect <block_start><return>sqrt(a<power>2+b<power>2)<block_end># this will overflow
<def_stmt>longSideBuiltin <block_start><return>hypot(a b)<block_end># better to use built-in function
|
<import_from_stmt>pypy.interpreter.mixedmodule MixedModule<class_stmt>Module(MixedModule)<block_start>applevel_name='pytest'<line_sep>interpleveldefs={'raises':'interp_pytest.pypyraises' 'skip':'interp_pytest.pypyskip' 'fixture':'interp_pytest.fake_fixture' }<line_sep>appleveldefs={'importorskip':'app_pytest.importorskip' 'mark':'app_pytest.mark' }<block_end>
|
<import_stmt>gmaps<import_stmt>gmaps.datasets<line_sep>gmaps.configure(api_key="AI...")# Your Google API key
locations=gmaps.datasets.load_dataset("taxi_rides")<line_sep>fig=gmaps.figure()<line_sep># locations could be an array, a dataframe or just a Python iterable
fig.add_layer(gmaps.heatmap_layer(locations))<line_sep>fig<line_sep>
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
<import_from_stmt>torch.testing._internal.common_methods_invocations op_db<import_from_stmt>torch.testing._internal.common_device_type instantiate_device_type_tests ops <import_from_stmt>torch.testing._internal.common_utils TestCase run_tests<import_from_stmt>functorch_lagging_op_db functorch_lagging_op_db in_functorch_lagging_op_db <import_stmt>torch<class_stmt>TestFuncTorchLaggingOpDb(TestCase)<block_start><def_stmt>test_functorch_lagging_op_db_has_opinfos self device<block_start>self.assertEqual(len(functorch_lagging_op_db) len(op_db))<block_end>@ops(op_db allowed_dtypes=(torch.float ))<def_stmt>test_coverage self device dtype op<block_start><if_stmt>in_functorch_lagging_op_db(op)<block_start><return><block_end><raise>RuntimeError(f"{(op.name op.variant_test_name)} is in PyTorch's OpInfo db " "but is not in functorch's OpInfo db. Please regenerate " "test/functorch_lagging_op_db.py and add the new tests to " "denylists if necessary.")<block_end><block_end>instantiate_device_type_tests(TestFuncTorchLaggingOpDb globals() only_for=['cpu'])<if_stmt>__name__<eq>'__main__'<block_start>run_tests()<block_end>
|
"""The imagery client to connect to the camera job."""<import_from_stmt>typing Any Dict Sequence Text<import_stmt>gin<import_from_stmt>pybullet_envs.minitaur.fw_bridge worker_builder<import_from_stmt>pybullet_envs.minitaur.vision imagery_pb2<import_from_stmt>pybullet_envs.minitaur.vision imagery_utils<import_from_stmt>google3.third_party.fluxworks.core.fluxworks.python.genericutil_py fwassert<import_from_stmt>google3.third_party.fluxworks.core.fluxworks.python.genericutil_py timeutil<line_sep>_RPC_TIMEOUT=1<times>timeutil.TimeUtil.SEC<line_sep>_URI_START_CAPTURE="fwuri://VisionJob/StartCapture"<line_sep>_URI_STOP_CAPTURE="fwuri://VisionJob/StopCapture"<line_sep>_URI_GET_FRAME="fwuri://VisionJob/GetFrame"<line_sep>@gin.configurable<class_stmt>ImageryClient(object)<block_start>"""Sends commands and receives states from cameras."""<def_stmt>__init__ self fw_worker=<none> rpc_timeout_sec=_RPC_TIMEOUT ip_address=<none> port=<none> async_mode=<false> start_capture_uri:Text=_URI_START_CAPTURE stop_capture_uri:Text=_URI_STOP_CAPTURE get_frame_uri:Text=_URI_GET_FRAME <block_start>"""Initializes the client.
Args:
fw_worker: A FluxWorks worker instance.
rpc_timeout_sec: The timeout for any RPC calls from this client.
ip_address: The ip address of the camera/vision process. If vision job is
also instantiated in the same FluxWorks worker, both ip address and port
are not needed.
port: The port of the camera/vision process.
async_mode: Whether the RPC calls in this client are async or synchronous.
Aync mode is only required when you have multiple workers communicating
with each other in the same Python process. If worker A is calling
worker B's RPC, worker B's RPC is trying to get GIL from it's thread but
caller (worker A) already holds the GIL, and this will cause a dead lock
if worker A's calls are synchronous. If worker A is calling its own RPC,
the same GIL can be used so there is no dead lock, and there is no need
for async mode. Async mode will require context switching and thus is a
bit slower.
start_capture_uri: The FluxWorks URI to start camera capture.
stop_capture_uri: The FluxWorks URI to stop camera capture.
get_frame_uri: The FluxWorks URI to get camera frames.
"""<line_sep>self._rpc_timeout_sec=rpc_timeout_sec<if_stmt>fw_worker<is><none><block_start>fw_worker=worker_builder.GetDefaultWorker()<block_end>self._worker=fw_worker<line_sep># TODO(tingnan): Use a single address and split the string for FW.
<if_stmt>ip_address<is><not><none><block_start>self._worker.ConnectToWorker(ip_address port)<block_end>self._async_mode=async_mode<line_sep>self._start_capture_uri=start_capture_uri<line_sep>self._stop_capture_uri=stop_capture_uri<line_sep>self._get_frame_uri=get_frame_uri<block_end><def_stmt>_convert_camera_frame_to_image_dict self camera_frame:imagery_pb2.CameraFrame<block_start>"""Converts the camera frame to an image dictionary."""<line_sep># Each camera frame might contain multiple image channels, such as rgb and
# depth.
images={}<for_stmt>image_name,image_proto camera_frame.images.items()<block_start>image_array=imagery_utils.convert_image_to_array(image_proto)<line_sep>images[image_name]=image_array<block_end><return>images<block_end><def_stmt>start_capture self run_id:Text="vision"<block_start>"""Starts the camera capture session.
Args:
run_id: The capture session id. This id will determine the name of the
image logs' sub-direcotry.
"""<line_sep>capture_request=imagery_pb2.CaptureRequest()<line_sep>capture_request.run_id=run_id<line_sep>fwassert.FwAssert.CheckErrorMessage(self._worker.CallOnewayProtoRpc(self._start_capture_uri capture_request async_mode=self._async_mode))<block_end><def_stmt>stop_capture self<block_start>"""Concludes the current capture session."""<line_sep>capture_request=imagery_pb2.CaptureRequest()<line_sep>fwassert.FwAssert.CheckErrorMessage(self._worker.CallOnewayProtoRpc(self._stop_capture_uri capture_request async_mode=self._async_mode))<block_end><def_stmt>get_camera_images self<arrow>Dict[Text Sequence[Any]]<block_start>"""Gets the latest camera images.
Camera images can only be obtained after self.start_capture() is called.
Returns:
A dictionary of camera frames, with the camera id as the key. Each camera
frame may contain multiple streams. For example, on a realsense camera we
may have "rgb" and "depth" streams, depending on the configuration.
"""<line_sep>get_frame_request=imagery_pb2.GetFrameRequest()<line_sep>frame_collection=imagery_pb2.CameraFrameCollection()<line_sep>fwassert.FwAssert.CheckErrorMessage(self._worker.CallRoundtripProtoRpc(self._get_frame_uri get_frame_request frame_collection self._rpc_timeout_sec async_mode=self._async_mode))<line_sep>images_by_camera={}<for_stmt>camera_frame frame_collection.frames<block_start>camera_id=camera_frame.camera_id<line_sep># In case we received multiple frames, we apppend them in the order
# received.
<if_stmt>camera_id<in>images_by_camera<block_start>images_by_camera[camera_id].append(self._convert_camera_frame_to_image_dict(camera_frame))<block_end><else_stmt><block_start>images_by_camera[camera_id]=[self._convert_camera_frame_to_image_dict(camera_frame)]<block_end><block_end><return>images_by_camera<block_end><block_end>
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
<import_from_stmt>graph.types ConstantInputParameters<import_from_stmt>utils.node_id NodeId<class_stmt>RescaleConstantMixin()<block_start>@classmethod<def_stmt>rescale_constant cls node:ConstantInputParameters scale qrecs dtype=<none><block_start>qrec=qrecs[NodeId(node)]<line_sep>qtype=qrec.out_qs[0]<if_stmt>(qtype.scale<eq>scale.astype(qtype.scale.dtype)<and>(dtype<is><none><or>dtype<eq>qtype.dtype))<block_start><return><block_end><if_stmt>node.qtype<block_start>node.value=node.dqvalue<line_sep>node.qtype=<none><block_end>qtype.scale=scale<if_stmt>dtype<block_start>qtype.dtype=dtype<block_end><block_end><block_end>
|
<import_stmt>glob<import_stmt>os<import_stmt>logging<import_from_stmt>. pattern<import_from_stmt>.import_resolver PyImportResolver<import_from_stmt>.dialect autodetect_dialects<import_from_stmt>.util join_path<def_stmt>_append_dict sdict key value<block_start><if_stmt>key<in>sdict<block_start>sdict[key].append(value)<block_end><else_stmt><block_start>sdict[key]=[value]<block_end><block_end><class_stmt>Workspace<block_start>"""Analysis workspace"""<def_stmt>__init__ self logger=<none># logger
<block_start>self.logger=logging<if>logger<is><none><else>logger<line_sep># states
self.pyimport_resolver=PyImportResolver()<line_sep>self.key2defs={}<line_sep>self.key2refs={}<line_sep>self.modpath2exports={}<line_sep>self._need_reload=<false><line_sep># information
self._root_path=<none><block_end><def_stmt>initialize self root_path# By default only update root/src, root/python, root/include
# can add configs later
<block_start>self.logger.info("root_path: %s" root_path)<line_sep>self._providers=autodetect_dialects(root_path self.pyimport_resolver self.logger)<line_sep>self._root_path=root_path<line_sep>self._reload()<block_end><def_stmt>_reload self<block_start>"""Reload workspace."""<line_sep>self.key2defs={}<line_sep>self.key2refs={}<line_sep>self.modpath2exports={}<line_sep>scan_dirs=[os.path.join(self._root_path "src") os.path.join(self._root_path "include") os.path.join(self._root_path "python")]<for_stmt>provider self._providers<block_start>scan_dirs<augadd>provider.get_additional_scan_dirs(self._root_path)<block_end><for_stmt>dirname scan_dirs<block_start>self.update_dir(dirname)<block_end>self._need_reload=<false><block_end><def_stmt>_sync_states self<block_start>"""Synchronize the workspace states."""<if_stmt>self._need_reload<block_start>self._reload()<block_end><block_end><def_stmt>init_pass self path source<block_start>"""Initialization pass"""<line_sep>mod_path=path[:-3]<if>path.endswith(".py")<else>path<line_sep>self.pyimport_resolver.update_doc(path source)<for_stmt>provider self._providers<block_start>provider.init_pass(path source)<block_end><block_end><def_stmt>update_dir self dirname<block_start>self.logger.info("Workspace.update_dir %s start" dirname)<line_sep># intialize pass
<for_stmt>path sorted(glob.glob(join_path(dirname "**/*.py") recursive=<true>))<block_start>self.init_pass(path open(path).readlines())<block_end># normal scans
<for_stmt>path sorted(glob.glob(join_path(dirname "**/*.py") recursive=<true>))<block_start>self.update_doc(path open(path).readlines())<block_end><for_stmt>path sorted(glob.glob(join_path(dirname "**/*.h") recursive=<true>))<block_start>self.update_doc(path open(path).readlines())<block_end><for_stmt>path sorted(glob.glob(join_path(dirname "**/*.cc") recursive=<true>))<block_start>self.update_doc(path open(path).readlines())<block_end><for_stmt>path sorted(glob.glob(join_path(dirname "**/*.cpp") recursive=<true>))<block_start>self.update_doc(path open(path).readlines())<block_end>self.logger.info("Workspace.update_dir %s finish" dirname)<block_end><def_stmt>update_doc self path source<block_start><for_stmt>provider self._providers<block_start><for_stmt>pt provider.extract(path source)<block_start>mod_path=path[:-3]<if>path.endswith(".py")<else>path<if_stmt>isinstance(pt pattern.Def)<block_start>_append_dict(self.key2defs pt.key pt)<block_end><elif_stmt>isinstance(pt pattern.Ref)<block_start>_append_dict(self.key2refs pt.key pt)<block_end><elif_stmt>isinstance(pt pattern.Export)<block_start>_append_dict(self.modpath2exports mod_path pt)<block_end><else_stmt><block_start>self.logger.warn("Ignore pattern %s, path=%s" pt path)<block_end><block_end><block_end>self.logger.debug("Workspace.update_doc %s" path)<block_end><def_stmt>find_defs self mod_path sym_name<block_start>"""Get definition given python mod path and symbol name"""<line_sep>self._sync_states()<line_sep>mod_path,var_name=self.pyimport_resolver.resolve(mod_path sym_name)<if_stmt>var_name<is><none><block_start><return>[]<block_end>export_list=self.modpath2exports.get(mod_path [])<for_stmt>item export_list<block_start>key=item.fvar2key(var_name)<if_stmt>key<in>self.key2defs<block_start><return>self.key2defs[key]<block_end><block_end><return>[]<block_end><def_stmt>_py_find_refs self key# Step 1: find python ffi module that import the related function
<block_start>var_targets=set()<line_sep>mod_targets={}<for_stmt>mod_path,exports self.modpath2exports.items()<block_start><for_stmt>item exports<block_start><if_stmt>key.startswith(item.key_prefix)<block_start>var_name=item.fkey2var(key)<line_sep>var_targets.add((mod_path var_name))<line_sep>mod_targets[mod_path]=var_name<block_end><block_end><block_end># Step2: find modules that imports the ffi modules
# construct search terms
search_map={}<for_stmt>mod_path,var_list self.pyimport_resolver._modpath2imports.items()<block_start>search_term=[]<for_stmt>var_name var_list<block_start>new_path,new_var=self.pyimport_resolver.resolve(mod_path var_name)<if_stmt>(new_path new_var)<in>var_targets<block_start>search_term.append(var_name)<block_end><if_stmt>new_var<is><none><and>new_path<in>mod_targets<block_start>search_term.append(var_name+"."+mod_targets[new_path])<block_end><block_end><if_stmt>search_term<block_start>search_map[mod_path]=search_term<block_end><block_end><for_stmt>mod_path,var_name mod_targets.items()<block_start>search_map[mod_path]=[var_name]<block_end># Step 3: search the related files
results=[]<for_stmt>mod_path,terms search_map.items()<block_start>path=mod_path<if>mod_path.endswith(".py")<else>mod_path+".py"<if_stmt>os.path.isfile(path)<block_start>res=pattern.search_symbol(open(path).read() terms)<for_stmt>x res<block_start>results.append(pattern.Ref(key=key path=path range=x))<block_end><block_end><block_end><return>results<block_end><def_stmt>find_refs self key<block_start>self._sync_states()<line_sep>res=self._py_find_refs(key)<line_sep>res<augadd>self.key2refs.get(key [])<line_sep><return>res<block_end><def_stmt>extract_symbol self path source pos<block_start><for_stmt>pt self._providers<block_start>res=pt.extract_symbol(path source pos)<if_stmt>res<block_start><return>res<block_end><block_end><return>pattern.extract_symbol(source pos)<block_end><block_end>
|
# encoding: utf-8
# Copyright (c) <NAME> <<EMAIL>>
# Distributed under the terms of the Modified BSD License.
<import_from_future_stmt> absolute_import print_function unicode_literals<import_stmt>codecs<import_stmt>glob<import_stmt>os<import_stmt>tempfile<import_stmt>inspect<import_stmt>unittest<import_stmt>re<import_from_stmt>knitpy.knitpy Knitpy<import_from_stmt>knitpy.py3compat PY3<def_stmt>_add_test_cases cls foldername<block_start>""" Adds one testcase for each input file in the 'test_dir'
You have to build a TestCase class, with a _output_test(self, input_file, output_file)
method and a tests_dir property, which is simply the name of the dir, where the test cases
are in.
The inputs for the test cases have to have a file ending "*_input.pymd" and the outputs have
to end in "*_output.md".
The `_output_test` method has to convert input and then test for equality with the output.
The generated test methods will be called `test_something` for `something_input.pymd`.
"""<line_sep># Put them together to make a list of new test functions.
# One test function for each input file
tests_dir=os.path.join(os.path.dirname(inspect.getfile(cls)) foldername)<line_sep>test_cases_glob=os.path.join(tests_dir "*.pymd")<line_sep>testcases=glob.glob(test_cases_glob)<line_sep>function=cls._output_test<for_stmt>input_file testcases# remove ".pymd" from filename
<block_start>basename=os.path.splitext(os.path.basename(input_file))[0]<line_sep>output_file=os.path.join(tests_dir basename+".md")<line_sep># the complicated syntax is needed to get the individual input files into the method...
# http://math.andrej.com/2009/04/09/pythons-lambda-is-broken/comment-page-1/
<def_stmt>test_function self input_file=input_file output_file=output_file<block_start>function(self input_file output_file)<block_end>name="test_%s_%s"%(foldername basename)<line_sep>test_function.__name__=str(name)<line_sep>setattr(cls name test_function)<block_end><block_end><class_stmt>AbstractOutputTestCase(unittest.TestCase)#<ipython-input-2-fb4ced135814>
<block_start>_re_ipython_id=re.compile(r"<ipython-input-[0-9]+-[a-z0-9]+>")<def_stmt>setUp self<block_start>self.maxDiff=<none><line_sep>self.knitpy=Knitpy()<block_end><def_stmt>_output_test self input_file output_file<block_start><with_stmt>codecs.open(input_file 'r' 'UTF-8')<as>f<block_start>input=f.read()<block_end># some exceptions are different on py2 and py3, so add a way to make both happy...
# the version which were used to develop the tests (currently py2) should stay '.md' and
# the exception should become '.md_pyX'
<if_stmt>PY3<block_start><if_stmt>os.path.exists(output_file+"_py3")<block_start>output_file=output_file+"_py3"<block_end><block_end><else_stmt><block_start><if_stmt>os.path.exists(output_file+"_py2")<block_start>output_file=output_file+"_py2"<block_end><block_end>output=self.knitpy._knit(input tempfile.gettempdir())<if_stmt><not>os.path.exists(output_file)<block_start>_file=output_file+".off"<with_stmt>codecs.open(_file 'w' 'UTF-8')<as>f<block_start>output=self._re_ipython_id.sub("<ipython-input>" output)<line_sep>output=output.replace(os.linesep "\n")<line_sep>f.write(output)<block_end>self.fail("Output does not exist, created one as %s. Remove '.off' to enable it.")<block_end><with_stmt>codecs.open(output_file 'r' 'UTF-8')<as>f<block_start>exp=f.read()<block_end>self.assert_equal_output(exp output filename=output_file)<block_end><def_stmt>assert_equal_output self expected received filename=<none># output written to a file does not seem to have os.linesep
# handle everything here by replacing the os linesep by a simple \n
<block_start>expected=expected.replace(os.linesep "\n").rstrip('\n')<line_sep>received=received.replace(os.linesep "\n").rstrip('\n')<line_sep># in errors, there is a unique id like <ipython-input-2-fb4ced135814>
received=self._re_ipython_id.sub("<ipython-input>" received)<line_sep># this is a hardcoded fix for py3, where there are quotes around the module:
received=received.replace("'NoneExistingModule'" "NoneExistingModule")<if_stmt>filename<and>expected<ne>received<block_start>_file=filename+".received"<with_stmt>codecs.open(_file 'w' 'UTF-8')<as>f<block_start>f.write(received)<block_end><block_end>self.assertEqual(expected received)<block_end><block_end>
|
<import_stmt>bpy<line_sep>bpy.context.camera.sensor_width=23.4<line_sep>bpy.context.camera.sensor_height=15.6<line_sep>bpy.context.camera.sensor_fit='HORIZONTAL'<line_sep>
|
#This code comes from: https://github.com/becomequantum/kryon
<import_from_stmt>PIL Image ImageDraw ImageFont<import_stmt>numpy<as>np<line_sep>#This code is only about animation. 本代码只是和做演示动画相关.
VideoSize=(1280 720)<line_sep>DemoImageSize=(48 36)<line_sep>标题位置=(60 16)<line_sep>注释1位置=(1000 76)<line_sep>网格位置=(32 76)<line_sep>比例=17<line_sep>网格颜色=(230 230 230)<line_sep>网三位置=(网格位置[0]+比例<times>DemoImageSize[0]+比例<times>2 网格位置[1])<line_sep>网三比例=比例<times>2<line_sep>坐标位置=(网三位置[0] 网三位置[1]+网三比例<times>3+5)<line_sep>注释2位置=(坐标位置[0] 坐标位置[1]+比例+18)<line_sep>副标题位置=(注释2位置[0] 注释2位置[1]+350)<line_sep>UnitTime=0.1<line_sep>ScanTime=0.1<line_sep>FinishTime=0.1<line_sep>frame_list=[]<def_stmt>微软雅黑 Size<block_start><return>ImageFont.truetype("msyh.ttf" Size)<block_end><def_stmt>方框 x y 位置 比例<block_start>左上=(位置[0]+x<times>比例 位置[1]+y<times>比例)<line_sep>右下=(位置[0]+x<times>比例+比例 位置[1]+y<times>比例+比例)<line_sep><return>[左上 右下]<block_end><def_stmt>小方框 x y 位置 比例<block_start>左上=(位置[0]+x<times>比例+2 位置[1]+y<times>比例+2)<line_sep>右下=(位置[0]+x<times>比例+比例-2 位置[1]+y<times>比例+比例-2)<line_sep><return>[左上 右下]<block_end><def_stmt>方块 x y 位置 比例<block_start>左上=(位置[0]+x<times>比例+1 位置[1]+y<times>比例+1)<line_sep>右下=(位置[0]+x<times>比例+比例-1 位置[1]+y<times>比例+比例-1)<line_sep><return>[左上 右下]<block_end><def_stmt>完成框 ShapeInfo<block_start>左上=(网格位置[0]+ShapeInfo[2][0]<times>比例-1 网格位置[1]+ShapeInfo[2][1]<times>比例-1)<line_sep>右下=(网格位置[0]+ShapeInfo[1][0]<times>比例+比例+1 网格位置[1]+ShapeInfo[1][1]<times>比例+比例+1)<line_sep><return>[左上 右下]<block_end><def_stmt>反色 color<block_start>rcolor=(255-color[0] 255-color[1] 255-color[2])<line_sep><return>rcolor<block_end><def_stmt>InitBackGround ExampleImage Title subtitle textcolor=(0 162 232) subtitlecolor="orange" BgColor=(255 255 255) FPGA=<false><block_start>back_ground_image=Image.new("RGB" VideoSize BgColor)<line_sep>画=ImageDraw.Draw(back_ground_image)<line_sep>画.text(标题位置 Title fill=textcolor font=微软雅黑(30))<line_sep>画.text(副标题位置 subtitle fill=subtitlecolor font=微软雅黑(25))<for_stmt>y range(DemoImageSize[1])<block_start><for_stmt>x range(DemoImageSize[0])<block_start>画.rectangle(方框(x y 网格位置 比例) outline=网格颜色)#画大背景网格
<if_stmt><not>(ExampleImage[y x 0]<eq>ExampleImage[y x 1]<eq>ExampleImage[y x 0]<eq>255)<block_start>画.rectangle(方块(x y 网格位置 比例) fill="black")#画示例图片中的黑点
ExampleImage[y x]=[0 0 0]#不是白点的都变成黑点
<block_end><if_stmt>x<le>2<and>y<le>2<block_start>画.rectangle(方框(x y 网三位置 网三比例) outline=网格颜色)#画右边3x3小邻域网格
<if_stmt>FPGA<and>(y<eq>1<or>(y<eq>2<and>x<eq>0))<block_start>画.rectangle(方框(x y-1 网三位置 网三比例) outline="blue")<block_end><block_end><block_end><block_end>画.rectangle(方框(1 1 网三位置 网三比例) outline="red")<line_sep><return>back_ground_image<block_end><def_stmt>AddClip bg_image x y Neighbourhood3x3 LabelColor=<none> diff=<false> duration=UnitTime Shape_info=<none> 注释1=" " 注释2=" "<block_start>标记=ImageDraw.Draw(bg_image)<if_stmt>LabelColor<ne><none><block_start>标记.rectangle(方块(x y 网格位置 比例) fill=LabelColor outline=<none>)#画标记色块
<block_end><if_stmt>diff#周围有两个不同标号点时
<block_start>标记.rectangle(小方框(x y 网格位置 比例) outline=反色(LabelColor))<block_end>temp_image=bg_image.copy()<line_sep>画=ImageDraw.Draw(temp_image)<if_stmt>Shape_info<ne><none><block_start>标记.rectangle(完成框(Shape_info) outline="red")<line_sep>画.rectangle(完成框(Shape_info) outline="red")<block_end>画.rectangle(方框(x y 网格位置 比例) outline="red")#画小红框
画.text(注释1位置 注释1 fill="purple" font=微软雅黑(25))<line_sep>画.text(注释2位置 注释2 fill=LabelColor<if>(LabelColor<ne><none>)<else>"purple" font=微软雅黑(25))<line_sep>画.text(坐标位置 str((x y)) fill="black" font=微软雅黑(25))<for_stmt>y range(3)<block_start><for_stmt>x range(3)<block_start>画.rectangle(方块(x y 网三位置 网三比例) fill=tuple(Neighbourhood3x3[y x]))<block_end><block_end>[frame_list.append(np.array(temp_image))<for>n range(int(duration/UnitTime))]<block_end>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_stmt>unittest<import_from_stmt>telemetry decorators<class_stmt>Foo(object)<block_start><pass><block_end><def_stmt>CreateFooUncached _<block_start><return>Foo()<block_end>@decorators.Cache<def_stmt>CreateFooCached _<block_start><return>Foo()<block_end><class_stmt>DecoratorsUnitTest(unittest.TestCase)<block_start><def_stmt>testCacheDecorator self<block_start>self.assertNotEquals(CreateFooUncached(1) CreateFooUncached(2))<line_sep>self.assertNotEquals(CreateFooCached(1) CreateFooCached(2))<line_sep>self.assertNotEquals(CreateFooUncached(1) CreateFooUncached(1))<line_sep>self.assertEquals(CreateFooCached(1) CreateFooCached(1))<block_end><block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.